1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26 * Copyright 2020 RackTop Systems, Inc.
27 */
28
29 #include <emlxs.h>
30
31
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_SLI4_C);
34
35 static int emlxs_sli4_init_extents(emlxs_hba_t *hba,
36 MAILBOXQ *mbq);
37 static uint32_t emlxs_sli4_read_status(emlxs_hba_t *hba);
38
39 static int emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
40
41 static uint32_t emlxs_sli4_read_sema(emlxs_hba_t *hba);
42
43 static uint32_t emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
44
45 static void emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys,
46 boolean_t high);
47
48 static void emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid,
49 uint_t posted, uint_t index);
50
51 static void emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid,
52 uint_t count);
53
54 static void emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid,
55 uint_t count);
56
57 static void emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid,
58 uint32_t count, boolean_t arm);
59 static void emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid,
60 uint32_t count, boolean_t arm);
61
62 static int emlxs_sli4_create_queues(emlxs_hba_t *hba,
63 MAILBOXQ *mbq);
64 static int emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
65 MAILBOXQ *mbq);
66 static int emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
67 MAILBOXQ *mbq);
68
69 static int emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
70
71 static int emlxs_sli4_map_hdw(emlxs_hba_t *hba);
72
73 static void emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
74
75 static int32_t emlxs_sli4_online(emlxs_hba_t *hba);
76
77 static void emlxs_sli4_offline(emlxs_hba_t *hba,
78 uint32_t reset_requested);
79
80 static uint32_t emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
81 uint32_t skip_post, uint32_t quiesce);
82 static void emlxs_sli4_hba_kill(emlxs_hba_t *hba);
83
84 static uint32_t emlxs_sli4_hba_init(emlxs_hba_t *hba);
85
86 static uint32_t emlxs_sli4_bde_setup(emlxs_port_t *port,
87 emlxs_buf_t *sbp);
88
89 static void emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
90 CHANNEL *cp, IOCBQ *iocb_cmd);
91 static uint32_t emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
92 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
93 static uint32_t emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
94 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
95 #ifdef SFCT_SUPPORT
96 static uint32_t emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
97 emlxs_buf_t *cmd_sbp, int channel);
98 static uint32_t emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
99 emlxs_buf_t *sbp);
100 #endif /* SFCT_SUPPORT */
101
102 static uint32_t emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
103 emlxs_buf_t *sbp, int ring);
104 static uint32_t emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
105 emlxs_buf_t *sbp);
106 static uint32_t emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
107 emlxs_buf_t *sbp);
108 static uint32_t emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
109 emlxs_buf_t *sbp);
110 static void emlxs_sli4_poll_intr(emlxs_hba_t *hba);
111 static int32_t emlxs_sli4_intx_intr(char *arg);
112
113 #ifdef MSI_SUPPORT
114 static uint32_t emlxs_sli4_msi_intr(char *arg1, char *arg2);
115 #endif /* MSI_SUPPORT */
116
117 static void emlxs_sli4_resource_free(emlxs_hba_t *hba);
118
119 static int emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
120 extern void emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
121
122 static XRIobj_t *emlxs_sli4_alloc_xri(emlxs_port_t *port,
123 emlxs_buf_t *sbp, RPIobj_t *rpip,
124 uint32_t type);
125 static void emlxs_sli4_enable_intr(emlxs_hba_t *hba);
126
127 static void emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
128
129 static void emlxs_sli4_timer(emlxs_hba_t *hba);
130
131 static void emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
132
133 static void emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba);
134
135 static void emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba);
136
137 static void emlxs_sli4_gpio_timer(void *arg);
138
139 static void emlxs_sli4_check_gpio(emlxs_hba_t *hba);
140
141 static uint32_t emlxs_sli4_fix_gpio(emlxs_hba_t *hba,
142 uint8_t *pin, uint8_t *pinval);
143
144 static uint32_t emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq);
145
146 static void emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
147
148 extern XRIobj_t *emlxs_sli4_reserve_xri(emlxs_port_t *port,
149 RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
150 static int emlxs_check_hdw_ready(emlxs_hba_t *);
151
152 static uint32_t emlxs_sli4_reg_did(emlxs_port_t *port,
153 uint32_t did, SERV_PARM *param,
154 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
155 IOCBQ *iocbq);
156
157 static uint32_t emlxs_sli4_unreg_node(emlxs_port_t *port,
158 emlxs_node_t *node, emlxs_buf_t *sbp,
159 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
160
161 static void emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
162 CQE_ASYNC_t *cqe);
163 static void emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
164 CQE_ASYNC_t *cqe);
165
166
167 static uint16_t emlxs_sli4_rqid_to_index(emlxs_hba_t *hba,
168 uint16_t rqid);
169 static uint16_t emlxs_sli4_wqid_to_index(emlxs_hba_t *hba,
170 uint16_t wqid);
171 static uint16_t emlxs_sli4_cqid_to_index(emlxs_hba_t *hba,
172 uint16_t cqid);
173
174 /* Define SLI4 API functions */
175 emlxs_sli_api_t emlxs_sli4_api = {
176 emlxs_sli4_map_hdw,
177 emlxs_sli4_unmap_hdw,
178 emlxs_sli4_online,
179 emlxs_sli4_offline,
180 emlxs_sli4_hba_reset,
181 emlxs_sli4_hba_kill,
182 emlxs_sli4_issue_iocb_cmd,
183 emlxs_sli4_issue_mbox_cmd,
184 #ifdef SFCT_SUPPORT
185 emlxs_sli4_prep_fct_iocb,
186 #else
187 NULL,
188 #endif /* SFCT_SUPPORT */
189 emlxs_sli4_prep_fcp_iocb,
190 emlxs_sli4_prep_ip_iocb,
191 emlxs_sli4_prep_els_iocb,
192 emlxs_sli4_prep_ct_iocb,
193 emlxs_sli4_poll_intr,
194 emlxs_sli4_intx_intr,
195 emlxs_sli4_msi_intr,
196 emlxs_sli4_disable_intr,
197 emlxs_sli4_timer,
198 emlxs_sli4_poll_erratt,
199 emlxs_sli4_reg_did,
200 emlxs_sli4_unreg_node
201 };
202
203
204 /* ************************************************************************** */
205
206 static void
emlxs_sli4_set_default_params(emlxs_hba_t * hba)207 emlxs_sli4_set_default_params(emlxs_hba_t *hba)
208 {
209 emlxs_port_t *port = &PPORT;
210
211 bzero((char *)&hba->sli.sli4.param, sizeof (sli_params_t));
212
213 hba->sli.sli4.param.ProtocolType = 0x3; /* FC/FCoE */
214
215 hba->sli.sli4.param.SliHint2 = 0;
216 hba->sli.sli4.param.SliHint1 = 0;
217 hba->sli.sli4.param.IfType = 0;
218 hba->sli.sli4.param.SliFamily = 0;
219 hba->sli.sli4.param.Revision = 0x4; /* SLI4 */
220 hba->sli.sli4.param.FT = 0;
221
222 hba->sli.sli4.param.EqeCntMethod = 0x1; /* Bit pattern */
223 hba->sli.sli4.param.EqPageSize = 0x1; /* 4096 */
224 hba->sli.sli4.param.EqeSize = 0x1; /* 4 byte */
225 hba->sli.sli4.param.EqPageCnt = 8;
226 hba->sli.sli4.param.EqeCntMask = 0x1F; /* 256-4096 elements */
227
228 hba->sli.sli4.param.CqeCntMethod = 0x1; /* Bit pattern */
229 hba->sli.sli4.param.CqPageSize = 0x1; /* 4096 */
230 hba->sli.sli4.param.CQV = 0;
231 hba->sli.sli4.param.CqeSize = 0x3; /* 16 byte */
232 hba->sli.sli4.param.CqPageCnt = 4;
233 hba->sli.sli4.param.CqeCntMask = 0x70; /* 256-1024 elements */
234
235 hba->sli.sli4.param.MqeCntMethod = 0x1; /* Bit pattern */
236 hba->sli.sli4.param.MqPageSize = 0x1; /* 4096 */
237 hba->sli.sli4.param.MQV = 0;
238 hba->sli.sli4.param.MqPageCnt = 8;
239 hba->sli.sli4.param.MqeCntMask = 0x0F; /* 16-128 elements */
240
241 hba->sli.sli4.param.WqeCntMethod = 0; /* Page Count */
242 hba->sli.sli4.param.WqPageSize = 0x1; /* 4096 */
243 hba->sli.sli4.param.WQV = 0;
244 hba->sli.sli4.param.WqeSize = 0x5; /* 64 byte */
245 hba->sli.sli4.param.WqPageCnt = 4;
246 hba->sli.sli4.param.WqeCntMask = 0x10; /* 256 elements */
247
248 hba->sli.sli4.param.RqeCntMethod = 0; /* Page Count */
249 hba->sli.sli4.param.RqPageSize = 0x1; /* 4096 */
250 hba->sli.sli4.param.RQV = 0;
251 hba->sli.sli4.param.RqeSize = 0x2; /* 8 byte */
252 hba->sli.sli4.param.RqPageCnt = 8;
253 hba->sli.sli4.param.RqDbWin = 1;
254 hba->sli.sli4.param.RqeCntMask = 0x100; /* 4096 elements */
255
256 hba->sli.sli4.param.Loopback = 0xf; /* unsupported */
257 hba->sli.sli4.param.PHWQ = 0;
258 hba->sli.sli4.param.PHON = 0;
259 hba->sli.sli4.param.TRIR = 0;
260 hba->sli.sli4.param.TRTY = 0;
261 hba->sli.sli4.param.TCCA = 0;
262 hba->sli.sli4.param.MWQE = 0;
263 hba->sli.sli4.param.ASSI = 0;
264 hba->sli.sli4.param.TERP = 0;
265 hba->sli.sli4.param.TGT = 0;
266 hba->sli.sli4.param.AREG = 0;
267 hba->sli.sli4.param.FBRR = 0;
268 hba->sli.sli4.param.SGLR = 1;
269 hba->sli.sli4.param.HDRR = 1;
270 hba->sli.sli4.param.EXT = 0;
271 hba->sli.sli4.param.FCOE = 1;
272
273 hba->sli.sli4.param.SgeLength = (64 * 1024);
274 hba->sli.sli4.param.SglAlign = 0x7 /* 4096 */;
275 hba->sli.sli4.param.SglPageSize = 0x1; /* 4096 */
276 hba->sli.sli4.param.SglPageCnt = 2;
277
278 hba->sli.sli4.param.MinRqSize = 128;
279 hba->sli.sli4.param.MaxRqSize = 2048;
280
281 hba->sli.sli4.param.RPIMax = 0x3ff;
282 hba->sli.sli4.param.XRIMax = 0x3ff;
283 hba->sli.sli4.param.VFIMax = 0xff;
284 hba->sli.sli4.param.VPIMax = 0xff;
285
286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
287 "Default SLI4 parameters set.");
288
289 } /* emlxs_sli4_set_default_params() */
290
291
292 /*
293 * emlxs_sli4_online()
294 *
295 * This routine will start initialization of the SLI4 HBA.
296 */
297 static int32_t
emlxs_sli4_online(emlxs_hba_t * hba)298 emlxs_sli4_online(emlxs_hba_t *hba)
299 {
300 emlxs_port_t *port = &PPORT;
301 emlxs_config_t *cfg;
302 emlxs_vpd_t *vpd;
303 MAILBOXQ *mbq = NULL;
304 MAILBOX4 *mb = NULL;
305 MATCHMAP *mp = NULL;
306 uint32_t i;
307 uint32_t j;
308 uint32_t rval = 0;
309 uint8_t *vpd_data;
310 uint32_t sli_mode;
311 uint8_t *outptr;
312 uint32_t status;
313 uint32_t fw_check;
314 uint32_t kern_update = 0;
315 emlxs_firmware_t hba_fw;
316 emlxs_firmware_t *fw;
317 uint16_t ssvid;
318 char buf[64];
319
320 cfg = &CFG;
321 vpd = &VPD;
322
323 sli_mode = EMLXS_HBA_SLI4_MODE;
324 hba->sli_mode = sli_mode;
325
326 /* Set the fw_check flag */
327 fw_check = cfg[CFG_FW_CHECK].current;
328
329 if ((fw_check & 0x04) ||
330 (hba->fw_flag & FW_UPDATE_KERNEL)) {
331 kern_update = 1;
332 }
333
334 hba->mbox_queue_flag = 0;
335 hba->fc_edtov = FF_DEF_EDTOV;
336 hba->fc_ratov = FF_DEF_RATOV;
337 hba->fc_altov = FF_DEF_ALTOV;
338 hba->fc_arbtov = FF_DEF_ARBTOV;
339
340 /* Networking not supported */
341 if (cfg[CFG_NETWORK_ON].current) {
342 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
343 "Networking is not supported in SLI4, turning it off");
344 cfg[CFG_NETWORK_ON].current = 0;
345 }
346
347 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
348 if (hba->chan_count > MAX_CHANNEL) {
349 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
350 "Max channels exceeded, dropping num-wq from %d to 1",
351 cfg[CFG_NUM_WQ].current);
352 cfg[CFG_NUM_WQ].current = 1;
353 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
354 }
355 hba->channel_fcp = 0; /* First channel */
356
357 /* Default channel for everything else is the last channel */
358 hba->channel_ip = hba->chan_count - 1;
359 hba->channel_els = hba->chan_count - 1;
360 hba->channel_ct = hba->chan_count - 1;
361
362 hba->fc_iotag = 1;
363 hba->io_count = 0;
364 hba->channel_tx_count = 0;
365
366 /* Specific to ATTO G5 boards */
367 if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
368 /* Set hard-coded GPIO pins */
369 if (hba->pci_function_number) {
370 hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 27;
371 hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 28;
372 hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 29;
373 hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 8;
374 } else {
375 hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 13;
376 hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 25;
377 hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 26;
378 hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 12;
379 }
380 }
381
382 /* Initialize the local dump region buffer */
383 bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
384 hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
385 hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
386 hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
387
388 (void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
389
390 if (hba->sli.sli4.dump_region.virt == NULL) {
391 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
392 "Unable to allocate dump region buffer.");
393
394 return (ENOMEM);
395 }
396
397 /*
398 * Get a buffer which will be used repeatedly for mailbox commands
399 */
400 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
401
402 mb = (MAILBOX4 *)mbq;
403
404 reset:
405 /* Reset & Initialize the adapter */
406 if (emlxs_sli4_hba_init(hba)) {
407 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
408 "Unable to init hba.");
409
410 rval = EIO;
411 goto failed1;
412 }
413
414 #ifdef FMA_SUPPORT
415 /* Access handle validation */
416 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
417 case SLI_INTF_IF_TYPE_6:
418 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
419 != DDI_FM_OK) ||
420 (emlxs_fm_check_acc_handle(hba,
421 hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK) ||
422 (emlxs_fm_check_acc_handle(hba,
423 hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK)) {
424 EMLXS_MSGF(EMLXS_CONTEXT,
425 &emlxs_invalid_access_handle_msg, NULL);
426
427 rval = EIO;
428 goto failed1;
429 }
430 break;
431 case SLI_INTF_IF_TYPE_2:
432 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
433 != DDI_FM_OK) ||
434 (emlxs_fm_check_acc_handle(hba,
435 hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK)) {
436 EMLXS_MSGF(EMLXS_CONTEXT,
437 &emlxs_invalid_access_handle_msg, NULL);
438
439 rval = EIO;
440 goto failed1;
441 }
442 break;
443 default :
444 if ((emlxs_fm_check_acc_handle(hba,
445 hba->pci_acc_handle) != DDI_FM_OK) ||
446 (emlxs_fm_check_acc_handle(hba,
447 hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK) ||
448 (emlxs_fm_check_acc_handle(hba,
449 hba->sli.sli4.bar2_acc_handle) != DDI_FM_OK)) {
450 EMLXS_MSGF(EMLXS_CONTEXT,
451 &emlxs_invalid_access_handle_msg, NULL);
452
453 rval = EIO;
454 goto failed1;
455 }
456 break;
457 }
458 #endif /* FMA_SUPPORT */
459
460 /*
461 * Setup and issue mailbox READ REV command
462 */
463 vpd->opFwRev = 0;
464 vpd->postKernRev = 0;
465 vpd->sli1FwRev = 0;
466 vpd->sli2FwRev = 0;
467 vpd->sli3FwRev = 0;
468 vpd->sli4FwRev = 0;
469
470 vpd->postKernName[0] = 0;
471 vpd->opFwName[0] = 0;
472 vpd->sli1FwName[0] = 0;
473 vpd->sli2FwName[0] = 0;
474 vpd->sli3FwName[0] = 0;
475 vpd->sli4FwName[0] = 0;
476
477 vpd->opFwLabel[0] = 0;
478 vpd->sli1FwLabel[0] = 0;
479 vpd->sli2FwLabel[0] = 0;
480 vpd->sli3FwLabel[0] = 0;
481 vpd->sli4FwLabel[0] = 0;
482
483 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
484
485 emlxs_mb_get_sli4_params(hba, mbq);
486 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
487 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
488 "Unable to read parameters. Mailbox cmd=%x status=%x",
489 mb->mbxCommand, mb->mbxStatus);
490
491 /* Set param defaults */
492 emlxs_sli4_set_default_params(hba);
493
494 } else {
495 /* Save parameters */
496 bcopy((char *)&mb->un.varSLIConfig.payload,
497 (char *)&hba->sli.sli4.param, sizeof (sli_params_t));
498
499 emlxs_data_dump(port, "SLI_PARMS",
500 (uint32_t *)&hba->sli.sli4.param,
501 sizeof (sli_params_t), 0);
502 }
503
504 /* Reuse mbq from previous mbox */
505 bzero(mbq, sizeof (MAILBOXQ));
506
507 emlxs_mb_get_port_name(hba, mbq);
508 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
509 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
510 "Unable to get port names. Mailbox cmd=%x status=%x",
511 mb->mbxCommand, mb->mbxStatus);
512
513 bzero(hba->sli.sli4.port_name,
514 sizeof (hba->sli.sli4.port_name));
515 } else {
516 /* Save port names */
517 bcopy((char *)&mb->un.varSLIConfig.payload,
518 (char *)&hba->sli.sli4.port_name,
519 sizeof (hba->sli.sli4.port_name));
520 }
521
522 /* Reuse mbq from previous mbox */
523 bzero(mbq, sizeof (MAILBOXQ));
524
525 emlxs_mb_read_rev(hba, mbq, 0);
526 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
528 "Unable to read rev. Mailbox cmd=%x status=%x",
529 mb->mbxCommand, mb->mbxStatus);
530
531 rval = EIO;
532 goto failed1;
533
534 }
535
536 emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
537 if (mb->un.varRdRev4.sliLevel != 4) {
538 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
539 "Invalid read rev Version for SLI4: 0x%x",
540 mb->un.varRdRev4.sliLevel);
541
542 rval = EIO;
543 goto failed1;
544 }
545
546 switch (mb->un.varRdRev4.dcbxMode) {
547 case EMLXS_DCBX_MODE_CIN: /* Mapped to nonFIP mode */
548 hba->flag &= ~FC_FIP_SUPPORTED;
549 break;
550
551 case EMLXS_DCBX_MODE_CEE: /* Mapped to FIP mode */
552 hba->flag |= FC_FIP_SUPPORTED;
553 break;
554
555 default:
556 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
557 "Invalid read rev dcbx mode for SLI4: 0x%x",
558 mb->un.varRdRev4.dcbxMode);
559
560 rval = EIO;
561 goto failed1;
562 }
563
564 /* Set FC/FCoE mode */
565 if (mb->un.varRdRev4.FCoE) {
566 hba->sli.sli4.flag |= EMLXS_SLI4_FCOE_MODE;
567 } else {
568 hba->sli.sli4.flag &= ~EMLXS_SLI4_FCOE_MODE;
569 }
570
571 /* Save information as VPD data */
572 vpd->rBit = 1;
573
574 vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
575 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
576
577 vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
578 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
579
580 vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
581 bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
582
583 vpd->biuRev = mb->un.varRdRev4.HwRev1;
584 vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
585 vpd->fcphLow = mb->un.varRdRev4.fcphLow;
586 vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
587 vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
588
589 /* Decode FW labels */
590 if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) {
591 bcopy(vpd->postKernName, vpd->sli4FwName, 16);
592 }
593 emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0,
594 sizeof (vpd->sli4FwName));
595 emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0,
596 sizeof (vpd->opFwName));
597 emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0,
598 sizeof (vpd->postKernName));
599
600 if (hba->model_info.chip == EMLXS_BE2_CHIP) {
601 (void) strlcpy(vpd->sli4FwLabel, "be2.ufi",
602 sizeof (vpd->sli4FwLabel));
603 } else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
604 (void) strlcpy(vpd->sli4FwLabel, "be3.ufi",
605 sizeof (vpd->sli4FwLabel));
606 } else if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
607 (void) strlcpy(vpd->sli4FwLabel, "xe201.grp",
608 sizeof (vpd->sli4FwLabel));
609 } else if (hba->model_info.chip == EMLXS_LANCERG6_CHIP) {
610 (void) strlcpy(vpd->sli4FwLabel, "xe501.grp",
611 sizeof (vpd->sli4FwLabel));
612 } else if (hba->model_info.chip == EMLXS_PRISMG7_CHIP) {
613 (void) strlcpy(vpd->sli4FwLabel, "xe601.grp",
614 sizeof (vpd->sli4FwLabel));
615 } else {
616 (void) strlcpy(vpd->sli4FwLabel, "sli4.fw",
617 sizeof (vpd->sli4FwLabel));
618 }
619
620 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
621 "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
622 vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
623 vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
624 mb->un.varRdRev4.dcbxMode);
625
626 /* No key information is needed for SLI4 products */
627
628 /* Get adapter VPD information */
629 vpd->port_index = (uint32_t)-1;
630
631 /* Reuse mbq from previous mbox */
632 bzero(mbq, sizeof (MAILBOXQ));
633
634 emlxs_mb_dump_vpd(hba, mbq, 0);
635 vpd_data = hba->sli.sli4.dump_region.virt;
636
637 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
638 MBX_SUCCESS) {
639 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
640 "No VPD found. status=%x", mb->mbxStatus);
641 } else {
642 EMLXS_MSGF(EMLXS_CONTEXT,
643 &emlxs_init_debug_msg,
644 "VPD dumped. rsp_cnt=%d status=%x",
645 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
646
647 if (mb->un.varDmp4.rsp_cnt) {
648 EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
649 0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
650
651 #ifdef FMA_SUPPORT
652 if (hba->sli.sli4.dump_region.dma_handle) {
653 if (emlxs_fm_check_dma_handle(hba,
654 hba->sli.sli4.dump_region.dma_handle)
655 != DDI_FM_OK) {
656 EMLXS_MSGF(EMLXS_CONTEXT,
657 &emlxs_invalid_dma_handle_msg,
658 "sli4_online: hdl=%p",
659 hba->sli.sli4.dump_region.
660 dma_handle);
661 rval = EIO;
662 goto failed1;
663 }
664 }
665 #endif /* FMA_SUPPORT */
666
667 }
668 }
669
670 if (vpd_data[0]) {
671 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
672 mb->un.varDmp4.rsp_cnt);
673
674 /*
675 * If there is a VPD part number, and it does not
676 * match the current default HBA model info,
677 * replace the default data with an entry that
678 * does match.
679 *
680 * After emlxs_parse_vpd model holds the VPD value
681 * for V2 and part_num hold the value for PN. These
682 * 2 values are NOT necessarily the same.
683 */
684
685 rval = 0;
686 if ((vpd->model[0] != 0) &&
687 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
688
689 /* First scan for a V2 match */
690
691 for (i = 1; i < emlxs_pci_model_count; i++) {
692 if (strcmp(&vpd->model[0],
693 emlxs_pci_model[i].model) == 0) {
694 bcopy(&emlxs_pci_model[i],
695 &hba->model_info,
696 sizeof (emlxs_model_t));
697 rval = 1;
698 break;
699 }
700 }
701 }
702
703 if (!rval && (vpd->part_num[0] != 0) &&
704 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
705
706 /* Next scan for a PN match */
707
708 for (i = 1; i < emlxs_pci_model_count; i++) {
709 if (strcmp(&vpd->part_num[0],
710 emlxs_pci_model[i].model) == 0) {
711 bcopy(&emlxs_pci_model[i],
712 &hba->model_info,
713 sizeof (emlxs_model_t));
714 break;
715 }
716 }
717 }
718
719 /* HP CNA port indices start at 1 instead of 0 */
720 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
721 ssvid = ddi_get16(hba->pci_acc_handle,
722 (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
723
724 if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
725 vpd->port_index--;
726 }
727 }
728
729 /*
730 * Now lets update hba->model_info with the real
731 * VPD data, if any.
732 */
733
734 /*
735 * Replace the default model description with vpd data
736 */
737 if (vpd->model_desc[0] != 0) {
738 (void) strncpy(hba->model_info.model_desc,
739 vpd->model_desc,
740 (sizeof (hba->model_info.model_desc)-1));
741 }
742
743 /* Replace the default model with vpd data */
744 if (vpd->model[0] != 0) {
745 (void) strncpy(hba->model_info.model, vpd->model,
746 (sizeof (hba->model_info.model)-1));
747 }
748
749 /* Replace the default program types with vpd data */
750 if (vpd->prog_types[0] != 0) {
751 emlxs_parse_prog_types(hba, vpd->prog_types);
752 }
753 }
754
755 /*
756 * Since the adapter model may have changed with the vpd data
757 * lets double check if adapter is not supported
758 */
759 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
760 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
761 "Unsupported adapter found. "
762 "Id:%d Vendor id:0x%x Device id:0x%x SSDID:0x%x "
763 "Model:%s", hba->model_info.id, hba->model_info.vendor_id,
764 hba->model_info.device_id, hba->model_info.ssdid,
765 hba->model_info.model);
766
767 rval = EIO;
768 goto failed1;
769 }
770
771 (void) strncpy(vpd->boot_version, vpd->sli4FwName,
772 (sizeof (vpd->boot_version)-1));
773
774 /* Get fcode version property */
775 emlxs_get_fcode_version(hba);
776
777 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
778 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
779 vpd->opFwRev, vpd->sli1FwRev);
780
781 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
782 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
783 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
784
785 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
786 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
787
788 /*
789 * If firmware checking is enabled and the adapter model indicates
790 * a firmware image, then perform firmware version check
791 */
792 hba->fw_flag = 0;
793 hba->fw_timer = 0;
794
795 if (((fw_check & 0x1) &&
796 (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
797 hba->model_info.fwid) ||
798 ((fw_check & 0x2) && hba->model_info.fwid)) {
799
800 /* Find firmware image indicated by adapter model */
801 fw = NULL;
802 for (i = 0; i < emlxs_fw_count; i++) {
803 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
804 fw = &emlxs_fw_table[i];
805 break;
806 }
807 }
808
809 /*
810 * If the image was found, then verify current firmware
811 * versions of adapter
812 */
813 if (fw) {
814 /* Obtain current firmware version info */
815 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
816 (void) emlxs_be_read_fw_version(hba, &hba_fw);
817 } else {
818 hba_fw.kern = vpd->postKernRev;
819 hba_fw.stub = vpd->opFwRev;
820 hba_fw.sli1 = vpd->sli1FwRev;
821 hba_fw.sli2 = vpd->sli2FwRev;
822 hba_fw.sli3 = vpd->sli3FwRev;
823 hba_fw.sli4 = vpd->sli4FwRev;
824 }
825
826 if (!kern_update &&
827 ((fw->kern && (hba_fw.kern != fw->kern)) ||
828 (fw->stub && (hba_fw.stub != fw->stub)))) {
829
830 hba->fw_flag |= FW_UPDATE_NEEDED;
831
832 } else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
833 (fw->stub && (hba_fw.stub != fw->stub)) ||
834 (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
835 (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
836 (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
837 (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
838
839 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
840 "Firmware update needed. "
841 "Updating. id=%d fw=%d",
842 hba->model_info.id, hba->model_info.fwid);
843
844 #ifdef MODFW_SUPPORT
845 /*
846 * Load the firmware image now
847 * If MODFW_SUPPORT is not defined, the
848 * firmware image will already be defined
849 * in the emlxs_fw_table
850 */
851 emlxs_fw_load(hba, fw);
852 #endif /* MODFW_SUPPORT */
853
854 if (fw->image && fw->size) {
855 uint32_t rc;
856
857 rc = emlxs_fw_download(hba,
858 (char *)fw->image, fw->size, 0);
859 if ((rc != FC_SUCCESS) &&
860 (rc != EMLXS_REBOOT_REQUIRED)) {
861 EMLXS_MSGF(EMLXS_CONTEXT,
862 &emlxs_init_msg,
863 "Firmware update failed.");
864 hba->fw_flag |=
865 FW_UPDATE_NEEDED;
866 }
867 #ifdef MODFW_SUPPORT
868 /*
869 * Unload the firmware image from
870 * kernel memory
871 */
872 emlxs_fw_unload(hba, fw);
873 #endif /* MODFW_SUPPORT */
874
875 fw_check = 0;
876
877 goto reset;
878 }
879
880 hba->fw_flag |= FW_UPDATE_NEEDED;
881
882 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
883 "Firmware image unavailable.");
884 } else {
885 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
886 "Firmware update not needed.");
887 }
888 } else {
889 /*
890 * This means either the adapter database is not
891 * correct or a firmware image is missing from the
892 * compile
893 */
894 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
895 "Firmware image unavailable. id=%d fw=%d",
896 hba->model_info.id, hba->model_info.fwid);
897 }
898 }
899
900 /* Reuse mbq from previous mbox */
901 bzero(mbq, sizeof (MAILBOXQ));
902
903 emlxs_mb_dump_fcoe(hba, mbq, 0);
904
905 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
906 MBX_SUCCESS) {
907 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
908 "No FCOE info found. status=%x", mb->mbxStatus);
909 } else {
910 EMLXS_MSGF(EMLXS_CONTEXT,
911 &emlxs_init_debug_msg,
912 "FCOE info dumped. rsp_cnt=%d status=%x",
913 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
914 (void) emlxs_parse_fcoe(hba,
915 (uint8_t *)hba->sli.sli4.dump_region.virt,
916 mb->un.varDmp4.rsp_cnt);
917 }
918
919 /* Reuse mbq from previous mbox */
920 bzero(mbq, sizeof (MAILBOXQ));
921
922 status = 0;
923 if (port->flag & EMLXS_INI_ENABLED) {
924 status |= SLI4_FEATURE_FCP_INITIATOR;
925 }
926 if (port->flag & EMLXS_TGT_ENABLED) {
927 status |= SLI4_FEATURE_FCP_TARGET;
928 }
929 if (cfg[CFG_NPIV_ENABLE].current) {
930 status |= SLI4_FEATURE_NPIV;
931 }
932 if (cfg[CFG_RQD_MODE].current) {
933 status |= SLI4_FEATURE_RQD;
934 }
935 if (cfg[CFG_PERF_HINT].current) {
936 if (hba->sli.sli4.param.PHON) {
937 status |= SLI4_FEATURE_PERF_HINT;
938 }
939 }
940
941 emlxs_mb_request_features(hba, mbq, status);
942
943 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
945 "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
946 mb->mbxCommand, mb->mbxStatus);
947
948 rval = EIO;
949 goto failed1;
950 }
951 emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
952
953 /* Check to see if we get the features we requested */
954 if (status != mb->un.varReqFeatures.featuresEnabled) {
955
956 /* Just report descrepencies, don't abort the attach */
957
958 outptr = (uint8_t *)emlxs_request_feature_xlate(
959 mb->un.varReqFeatures.featuresRequested);
960 (void) strlcpy(buf, (char *)outptr, sizeof (buf));
961
962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
963 "REQUEST_FEATURES: wanted:%s got:%s",
964 &buf[0], emlxs_request_feature_xlate(
965 mb->un.varReqFeatures.featuresEnabled));
966
967 }
968
969 if ((port->flag & EMLXS_INI_ENABLED) &&
970 !(mb->un.varReqFeatures.featuresEnabled &
971 SLI4_FEATURE_FCP_INITIATOR)) {
972 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
973 "Initiator mode not supported by adapter.");
974
975 rval = EIO;
976
977 #ifdef SFCT_SUPPORT
978 /* Check if we can fall back to just target mode */
979 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
980 (mb->un.varReqFeatures.featuresEnabled &
981 SLI4_FEATURE_FCP_TARGET) &&
982 (cfg[CFG_DTM_ENABLE].current == 1) &&
983 (cfg[CFG_TARGET_MODE].current == 1)) {
984
985 cfg[CFG_DTM_ENABLE].current = 0;
986
987 EMLXS_MSGF(EMLXS_CONTEXT,
988 &emlxs_init_failed_msg,
989 "Disabling dynamic target mode. "
990 "Enabling target mode only.");
991
992 /* This will trigger the driver to reattach */
993 rval = EAGAIN;
994 }
995 #endif /* SFCT_SUPPORT */
996 goto failed1;
997 }
998
999 if ((port->flag & EMLXS_TGT_ENABLED) &&
1000 !(mb->un.varReqFeatures.featuresEnabled &
1001 SLI4_FEATURE_FCP_TARGET)) {
1002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1003 "Target mode not supported by adapter.");
1004
1005 rval = EIO;
1006
1007 #ifdef SFCT_SUPPORT
1008 /* Check if we can fall back to just initiator mode */
1009 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
1010 (mb->un.varReqFeatures.featuresEnabled &
1011 SLI4_FEATURE_FCP_INITIATOR) &&
1012 (cfg[CFG_DTM_ENABLE].current == 1) &&
1013 (cfg[CFG_TARGET_MODE].current == 0)) {
1014
1015 cfg[CFG_DTM_ENABLE].current = 0;
1016
1017 EMLXS_MSGF(EMLXS_CONTEXT,
1018 &emlxs_init_failed_msg,
1019 "Disabling dynamic target mode. "
1020 "Enabling initiator mode only.");
1021
1022 /* This will trigger the driver to reattach */
1023 rval = EAGAIN;
1024 }
1025 #endif /* SFCT_SUPPORT */
1026 goto failed1;
1027 }
1028
1029 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
1030 hba->flag |= FC_NPIV_ENABLED;
1031 }
1032
1033 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_PERF_HINT) {
1034 hba->sli.sli4.flag |= EMLXS_SLI4_PHON;
1035 if (hba->sli.sli4.param.PHWQ) {
1036 hba->sli.sli4.flag |= EMLXS_SLI4_PHWQ;
1037 }
1038 }
1039
1040 /* Reuse mbq from previous mbox */
1041 bzero(mbq, sizeof (MAILBOXQ));
1042
1043 emlxs_mb_read_config(hba, mbq);
1044 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1046 "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
1047 mb->mbxCommand, mb->mbxStatus);
1048
1049 rval = EIO;
1050 goto failed1;
1051 }
1052 emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
1053
1054 /* Set default extents */
1055 hba->sli.sli4.XRICount = mb->un.varRdConfig4.XRICount;
1056 hba->sli.sli4.XRIExtCount = 1;
1057 hba->sli.sli4.XRIExtSize = hba->sli.sli4.XRICount;
1058 hba->sli.sli4.XRIBase[0] = mb->un.varRdConfig4.XRIBase;
1059
1060 hba->sli.sli4.RPICount = mb->un.varRdConfig4.RPICount;
1061 hba->sli.sli4.RPIExtCount = 1;
1062 hba->sli.sli4.RPIExtSize = hba->sli.sli4.RPICount;
1063 hba->sli.sli4.RPIBase[0] = mb->un.varRdConfig4.RPIBase;
1064
1065 hba->sli.sli4.VPICount = mb->un.varRdConfig4.VPICount;
1066 hba->sli.sli4.VPIExtCount = 1;
1067 hba->sli.sli4.VPIExtSize = hba->sli.sli4.VPICount;
1068 hba->sli.sli4.VPIBase[0] = mb->un.varRdConfig4.VPIBase;
1069
1070 hba->sli.sli4.VFICount = mb->un.varRdConfig4.VFICount;
1071 hba->sli.sli4.VFIExtCount = 1;
1072 hba->sli.sli4.VFIExtSize = hba->sli.sli4.VFICount;
1073 hba->sli.sli4.VFIBase[0] = mb->un.varRdConfig4.VFIBase;
1074
1075 hba->sli.sli4.FCFICount = mb->un.varRdConfig4.FCFICount;
1076
1077 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1078 "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1079 hba->sli.sli4.XRICount,
1080 hba->sli.sli4.RPICount,
1081 hba->sli.sli4.VPICount,
1082 hba->sli.sli4.VFICount,
1083 hba->sli.sli4.FCFICount);
1084
1085 if ((hba->sli.sli4.XRICount == 0) ||
1086 (hba->sli.sli4.RPICount == 0) ||
1087 (hba->sli.sli4.VPICount == 0) ||
1088 (hba->sli.sli4.VFICount == 0) ||
1089 (hba->sli.sli4.FCFICount == 0)) {
1090 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1091 "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1092 "vfi:%d fcfi:%d",
1093 hba->sli.sli4.XRICount,
1094 hba->sli.sli4.RPICount,
1095 hba->sli.sli4.VPICount,
1096 hba->sli.sli4.VFICount,
1097 hba->sli.sli4.FCFICount);
1098
1099 rval = EIO;
1100 goto failed1;
1101 }
1102
1103 if (mb->un.varRdConfig4.extents) {
1104 if (emlxs_sli4_init_extents(hba, mbq)) {
1105 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1106 "Unable to initialize extents.");
1107
1108 rval = EIO;
1109 goto failed1;
1110 }
1111 }
1112
1113 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1114 "CONFIG: port_name:%c %c %c %c",
1115 hba->sli.sli4.port_name[0],
1116 hba->sli.sli4.port_name[1],
1117 hba->sli.sli4.port_name[2],
1118 hba->sli.sli4.port_name[3]);
1119
1120 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1121 "CONFIG: ldv:%d link_type:%d link_number:%d",
1122 mb->un.varRdConfig4.ldv,
1123 mb->un.varRdConfig4.link_type,
1124 mb->un.varRdConfig4.link_number);
1125
1126 if (mb->un.varRdConfig4.ldv) {
1127 hba->sli.sli4.link_number = mb->un.varRdConfig4.link_number;
1128 } else {
1129 hba->sli.sli4.link_number = (uint32_t)-1;
1130 }
1131
1132 if (hba->sli.sli4.VPICount) {
1133 hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
1134 }
1135
1136 /* Set the max node count */
1137 if (cfg[CFG_NUM_NODES].current > 0) {
1138 hba->max_nodes =
1139 min(cfg[CFG_NUM_NODES].current,
1140 hba->sli.sli4.RPICount);
1141 } else {
1142 hba->max_nodes = hba->sli.sli4.RPICount;
1143 }
1144
1145 /* Set the io throttle */
1146 hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
1147
1148 /* Set max_iotag */
1149 /* We add 1 in case all XRI's are non-zero */
1150 hba->max_iotag = hba->sli.sli4.XRICount + 1;
1151
1152 if (cfg[CFG_NUM_IOTAGS].current) {
1153 hba->max_iotag = min(hba->max_iotag,
1154 (uint16_t)cfg[CFG_NUM_IOTAGS].current);
1155 }
1156
1157 /* Set out-of-range iotag base */
1158 hba->fc_oor_iotag = hba->max_iotag;
1159
1160 /* Save the link speed capabilities */
1161 vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
1162 emlxs_process_link_speed(hba);
1163
1164 /*
1165 * Allocate some memory for buffers
1166 */
1167 if (emlxs_mem_alloc_buffer(hba) == 0) {
1168 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1169 "Unable to allocate memory buffers.");
1170
1171 rval = ENOMEM;
1172 goto failed1;
1173 }
1174
1175 if (emlxs_sli4_resource_alloc(hba)) {
1176 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1177 "Unable to allocate resources.");
1178
1179 rval = ENOMEM;
1180 goto failed2;
1181 }
1182 emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
1183 emlxs_sli4_zero_queue_stat(hba);
1184
1185 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1186 if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
1187 hba->fca_tran->fca_num_npivports = hba->vpi_max;
1188 }
1189 #endif /* >= EMLXS_MODREV5 */
1190
1191 /* Reuse mbq from previous mbox */
1192 bzero(mbq, sizeof (MAILBOXQ));
1193
1194 if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
1195 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1196 "Unable to post sgl pages.");
1197
1198 rval = EIO;
1199 goto failed3;
1200 }
1201
1202 /* Reuse mbq from previous mbox */
1203 bzero(mbq, sizeof (MAILBOXQ));
1204
1205 if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
1206 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1207 "Unable to post header templates.");
1208
1209 rval = EIO;
1210 goto failed3;
1211 }
1212
1213 /*
1214 * Add our interrupt routine to kernel's interrupt chain & enable it
1215 * If MSI is enabled this will cause Solaris to program the MSI address
1216 * and data registers in PCI config space
1217 */
1218 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
1219 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1220 "Unable to add interrupt(s).");
1221
1222 rval = EIO;
1223 goto failed3;
1224 }
1225
1226 /* Reuse mbq from previous mbox */
1227 bzero(mbq, sizeof (MAILBOXQ));
1228
1229 /* This MUST be done after EMLXS_INTR_ADD */
1230 if (emlxs_sli4_create_queues(hba, mbq)) {
1231 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1232 "Unable to create queues.");
1233
1234 rval = EIO;
1235 goto failed3;
1236 }
1237
1238 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
1239
1240 /* Get and save the current firmware version (based on sli_mode) */
1241 emlxs_decode_firmware_rev(hba, vpd);
1242
1243
1244 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1245
1246 if (SLI4_FC_MODE) {
1247 /* Reuse mbq from previous mbox */
1248 bzero(mbq, sizeof (MAILBOXQ));
1249
1250 emlxs_mb_config_link(hba, mbq);
1251 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1252 MBX_SUCCESS) {
1253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1254 "Unable to configure link. Mailbox cmd=%x "
1255 "status=%x",
1256 mb->mbxCommand, mb->mbxStatus);
1257
1258 rval = EIO;
1259 goto failed3;
1260 }
1261 }
1262
1263 /* Reuse mbq from previous mbox */
1264 bzero(mbq, sizeof (MAILBOXQ));
1265
1266 /*
1267 * We need to get login parameters for NID
1268 */
1269 (void) emlxs_mb_read_sparam(hba, mbq);
1270 mp = (MATCHMAP *)mbq->bp;
1271 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1272 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1273 "Unable to read parameters. Mailbox cmd=%x status=%x",
1274 mb->mbxCommand, mb->mbxStatus);
1275
1276 rval = EIO;
1277 goto failed3;
1278 }
1279
1280 /* Free the buffer since we were polling */
1281 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1282 mp = NULL;
1283
1284 /* If no serial number in VPD data, then use the WWPN */
1285 if (vpd->serial_num[0] == 0) {
1286 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1287 for (i = 0; i < 12; i++) {
1288 status = *outptr++;
1289 j = ((status & 0xf0) >> 4);
1290 if (j <= 9) {
1291 vpd->serial_num[i] =
1292 (char)((uint8_t)'0' + (uint8_t)j);
1293 } else {
1294 vpd->serial_num[i] =
1295 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1296 }
1297
1298 i++;
1299 j = (status & 0xf);
1300 if (j <= 9) {
1301 vpd->serial_num[i] =
1302 (char)((uint8_t)'0' + (uint8_t)j);
1303 } else {
1304 vpd->serial_num[i] =
1305 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1306 }
1307 }
1308
1309 /*
1310 * Set port number and port index to zero
1311 * The WWN's are unique to each port and therefore port_num
1312 * must equal zero. This effects the hba_fru_details structure
1313 * in fca_bind_port()
1314 */
1315 vpd->port_num[0] = 0;
1316 vpd->port_index = 0;
1317
1318 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1319 "CONFIG: WWPN: port_index=0");
1320 }
1321
1322 /* Make final attempt to set a port index */
1323 if (vpd->port_index == (uint32_t)-1) {
1324 dev_info_t *p_dip;
1325 dev_info_t *c_dip;
1326
1327 p_dip = ddi_get_parent(hba->dip);
1328 c_dip = ddi_get_child(p_dip);
1329
1330 vpd->port_index = 0;
1331 while (c_dip && (hba->dip != c_dip)) {
1332 c_dip = ddi_get_next_sibling(c_dip);
1333
1334 if (strcmp(ddi_get_name(c_dip), "ethernet") == 0) {
1335 continue;
1336 }
1337
1338 vpd->port_index++;
1339 }
1340
1341 EMLXS_MSGF(EMLXS_CONTEXT,
1342 &emlxs_init_debug_msg,
1343 "CONFIG: Device tree: port_index=%d",
1344 vpd->port_index);
1345 }
1346
1347 if (vpd->port_num[0] == 0) {
1348 if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1349 (void) snprintf(vpd->port_num,
1350 (sizeof (vpd->port_num)-1),
1351 "%d", vpd->port_index);
1352 }
1353 }
1354
1355 if (vpd->id[0] == 0) {
1356 (void) snprintf(vpd->id, (sizeof (vpd->id)-1),
1357 "%s %d",
1358 hba->model_info.model_desc, vpd->port_index);
1359
1360 }
1361
1362 if (vpd->manufacturer[0] == 0) {
1363 (void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1364 (sizeof (vpd->manufacturer)-1));
1365 }
1366
1367 if (vpd->part_num[0] == 0) {
1368 (void) strncpy(vpd->part_num, hba->model_info.model,
1369 (sizeof (vpd->part_num)-1));
1370 }
1371
1372 if (vpd->model_desc[0] == 0) {
1373 (void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1374 "%s %d",
1375 hba->model_info.model_desc, vpd->port_index);
1376 }
1377
1378 if (vpd->model[0] == 0) {
1379 (void) strncpy(vpd->model, hba->model_info.model,
1380 (sizeof (vpd->model)-1));
1381 }
1382
1383 if (vpd->prog_types[0] == 0) {
1384 emlxs_build_prog_types(hba, vpd);
1385 }
1386
1387 /* Create the symbolic names */
1388 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1389 "%s %s FV%s DV%s %s",
1390 hba->model_info.manufacturer, hba->model_info.model,
1391 hba->vpd.fw_version, emlxs_version,
1392 (char *)utsname.nodename);
1393
1394 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1395 "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1396 hba->model_info.manufacturer,
1397 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1398 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1399 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1400
1401
1402 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1403 emlxs_sli4_enable_intr(hba);
1404
1405 /* Check persist-linkdown */
1406 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1407 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1408 goto done;
1409 }
1410
1411 #ifdef SFCT_SUPPORT
1412 if ((port->mode == MODE_TARGET) &&
1413 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1414 goto done;
1415 }
1416 #endif /* SFCT_SUPPORT */
1417
1418 /* Reuse mbq from previous mbox */
1419 bzero(mbq, sizeof (MAILBOXQ));
1420
1421 /*
1422 * Interupts are enabled, start the timeout timers now.
1423 */
1424 emlxs_timer_start(hba);
1425
1426 /*
1427 * Setup and issue mailbox INITIALIZE LINK command
1428 * At this point, the interrupt will be generated by the HW
1429 */
1430 emlxs_mb_init_link(hba, mbq,
1431 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1432
1433 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0);
1434 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1435 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1436 "Unable to initialize link. "
1437 "Mailbox cmd=%x status=%x",
1438 mb->mbxCommand, mb->mbxStatus);
1439
1440 rval = EIO;
1441 goto failed4;
1442 }
1443
1444 /* Wait for link to come up */
1445 i = cfg[CFG_LINKUP_DELAY].current;
1446 while (i && (hba->state < FC_LINK_UP)) {
1447 /* Check for hardware error */
1448 if (hba->state == FC_ERROR) {
1449 EMLXS_MSGF(EMLXS_CONTEXT,
1450 &emlxs_init_failed_msg,
1451 "Adapter error.");
1452
1453 rval = EIO;
1454 goto failed4;
1455 }
1456
1457 BUSYWAIT_MS(1000);
1458 i--;
1459 }
1460 if (i == 0) {
1461 EMLXS_MSGF(EMLXS_CONTEXT,
1462 &emlxs_init_msg,
1463 "Link up timeout");
1464 }
1465
1466 done:
1467 /*
1468 * The leadville driver will now handle the FLOGI at the driver level
1469 */
1470
1471 if (mbq) {
1472 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1473 mbq = NULL;
1474 mb = NULL;
1475 }
1476
1477 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1478 emlxs_sli4_gpio_timer_start(hba);
1479
1480 return (0);
1481
1482 failed4:
1483 emlxs_timer_stop(hba);
1484
1485 failed3:
1486 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1487
1488 if (mp) {
1489 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1490 mp = NULL;
1491 }
1492
1493
1494 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1495 (void) EMLXS_INTR_REMOVE(hba);
1496 }
1497
1498 emlxs_sli4_resource_free(hba);
1499
1500 failed2:
1501 (void) emlxs_mem_free_buffer(hba);
1502
1503 failed1:
1504 if (mbq) {
1505 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1506 mbq = NULL;
1507 mb = NULL;
1508 }
1509
1510 if (hba->sli.sli4.dump_region.virt) {
1511 mutex_enter(&EMLXS_PORT_LOCK);
1512 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1513 mutex_exit(&EMLXS_PORT_LOCK);
1514 }
1515
1516 if (rval == 0) {
1517 rval = EIO;
1518 }
1519
1520 return (rval);
1521
1522 } /* emlxs_sli4_online() */
1523
1524
1525 static void
emlxs_sli4_offline(emlxs_hba_t * hba,uint32_t reset_requested)1526 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1527 {
1528 /* Reverse emlxs_sli4_online */
1529
1530 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1531 emlxs_sli4_gpio_timer_stop(hba);
1532
1533 mutex_enter(&EMLXS_PORT_LOCK);
1534 if (hba->flag & FC_INTERLOCKED) {
1535 mutex_exit(&EMLXS_PORT_LOCK);
1536 goto killed;
1537 }
1538 mutex_exit(&EMLXS_PORT_LOCK);
1539
1540 if (reset_requested) {
1541 (void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1542 }
1543
1544 /* Shutdown the adapter interface */
1545 emlxs_sli4_hba_kill(hba);
1546
1547 killed:
1548
1549 /* Free SLI shared memory */
1550 emlxs_sli4_resource_free(hba);
1551
1552 /* Free driver shared memory */
1553 (void) emlxs_mem_free_buffer(hba);
1554
1555 /* Free the host dump region buffer */
1556 mutex_enter(&EMLXS_PORT_LOCK);
1557 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1558 mutex_exit(&EMLXS_PORT_LOCK);
1559
1560 } /* emlxs_sli4_offline() */
1561
1562 static int
emlxs_map_g7_bars(emlxs_hba_t * hba)1563 emlxs_map_g7_bars(emlxs_hba_t *hba)
1564 {
1565 emlxs_port_t *port = &PPORT;
1566 dev_info_t *dip;
1567 ddi_device_acc_attr_t dev_attr = emlxs_dev_acc_attr;
1568 uint_t num_prop;
1569 pci_regspec_t *prop;
1570 int rnum, type, size, rcount, r;
1571
1572 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba->dip, 0,
1573 "reg", (int **)&prop, &num_prop) != DDI_PROP_SUCCESS) {
1574 return (0);
1575 }
1576
1577 dip = (dev_info_t *)hba->dip;
1578 rcount = num_prop * sizeof (int) / sizeof (pci_regspec_t);
1579 for (r = 0; r < rcount; r++) {
1580 rnum = PCI_REG_REG_G(prop[r].pci_phys_hi);
1581 type = PCI_ADDR_MASK &prop[r].pci_phys_hi;
1582 size = prop[r].pci_size_low;
1583 EMLXS_MSGF(EMLXS_CONTEXT,
1584 &emlxs_init_debug_msg,
1585 "PCI_BAR%x regaddr=%x type=%x size=%x",
1586 r, rnum, PCI_REG_ADDR_G(type), size);
1587 if (type < PCI_ADDR_MEM32) {
1588 /* config or IO reg address */
1589 continue;
1590 }
1591 /* MEM reg address */
1592 caddr_t addr;
1593 ddi_acc_handle_t handle;
1594 int status;
1595
1596 status = ddi_regs_map_setup(dip, r,
1597 (caddr_t *)&addr, 0, 0, &dev_attr,
1598 &handle);
1599 if (status != DDI_SUCCESS) {
1600 EMLXS_MSGF(EMLXS_CONTEXT,
1601 &emlxs_attach_failed_msg,
1602 "ddi_regs_map_setup BAR%d failed."
1603 " status=%x",
1604 r, status);
1605 ddi_prop_free((void *)prop);
1606 return (0);
1607 }
1608 switch (r-1) {
1609 case 0:
1610 if (hba->sli.sli4.bar0_acc_handle == 0) {
1611 hba->sli.sli4.bar0_addr = addr;
1612 hba->sli.sli4.bar0_acc_handle =
1613 handle;
1614 }
1615 break;
1616 case 1:
1617 if (hba->sli.sli4.bar1_acc_handle == 0) {
1618 hba->sli.sli4.bar1_addr = addr;
1619 hba->sli.sli4.bar1_acc_handle =
1620 handle;
1621 }
1622 break;
1623 case 2:
1624 if (hba->sli.sli4.bar2_acc_handle == 0) {
1625 hba->sli.sli4.bar2_addr = addr;
1626 hba->sli.sli4.bar2_acc_handle =
1627 handle;
1628 }
1629 break;
1630 }
1631 }
1632 ddi_prop_free((void *)prop);
1633 return (num_prop > 0);
1634 }
1635
1636 /*ARGSUSED*/
1637 static int
emlxs_sli4_map_hdw(emlxs_hba_t * hba)1638 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1639 {
1640 emlxs_port_t *port = &PPORT;
1641 dev_info_t *dip;
1642 ddi_device_acc_attr_t dev_attr;
1643 int status;
1644
1645 dip = (dev_info_t *)hba->dip;
1646 dev_attr = emlxs_dev_acc_attr;
1647
1648 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1649 case SLI_INTF_IF_TYPE_0:
1650
1651 /* Map in Hardware BAR pages that will be used for */
1652 /* communication with HBA. */
1653 if (hba->sli.sli4.bar1_acc_handle == 0) {
1654 status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1655 (caddr_t *)&hba->sli.sli4.bar1_addr,
1656 0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1657 if (status != DDI_SUCCESS) {
1658 EMLXS_MSGF(EMLXS_CONTEXT,
1659 &emlxs_attach_failed_msg,
1660 "(PCI) ddi_regs_map_setup BAR1 failed. "
1661 "stat=%d mem=%p attr=%p hdl=%p",
1662 status, &hba->sli.sli4.bar1_addr, &dev_attr,
1663 &hba->sli.sli4.bar1_acc_handle);
1664 goto failed;
1665 }
1666 }
1667
1668 if (hba->sli.sli4.bar2_acc_handle == 0) {
1669 status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1670 (caddr_t *)&hba->sli.sli4.bar2_addr,
1671 0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1672 if (status != DDI_SUCCESS) {
1673 EMLXS_MSGF(EMLXS_CONTEXT,
1674 &emlxs_attach_failed_msg,
1675 "ddi_regs_map_setup BAR2 failed. status=%x",
1676 status);
1677 goto failed;
1678 }
1679 }
1680
1681 /* offset from beginning of register space */
1682 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1683 (uint32_t *)(hba->sli.sli4.bar1_addr +
1684 CSR_MPU_EP_SEMAPHORE_OFFSET);
1685 hba->sli.sli4.MBDB_reg_addr =
1686 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1687 hba->sli.sli4.CQDB_reg_addr =
1688 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1689 hba->sli.sli4.MQDB_reg_addr =
1690 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1691 hba->sli.sli4.WQDB_reg_addr =
1692 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1693 hba->sli.sli4.RQDB_reg_addr =
1694 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1695
1696 hba->sli.sli4.STATUS_reg_addr = 0;
1697 hba->sli.sli4.CNTL_reg_addr = 0;
1698
1699 hba->sli.sli4.ERR1_reg_addr =
1700 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET);
1701 hba->sli.sli4.ERR2_reg_addr =
1702 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET);
1703
1704 hba->sli.sli4.PHYSDEV_reg_addr = 0;
1705 break;
1706
1707 case SLI_INTF_IF_TYPE_2: /* Lancer FC */
1708
1709 /* Map in Hardware BAR pages that will be used for */
1710 /* communication with HBA. */
1711 if (hba->sli.sli4.bar0_acc_handle == 0) {
1712 status = ddi_regs_map_setup(dip, PCI_BAR0_RINDEX,
1713 (caddr_t *)&hba->sli.sli4.bar0_addr,
1714 0, 0, &dev_attr, &hba->sli.sli4.bar0_acc_handle);
1715 if (status != DDI_SUCCESS) {
1716 EMLXS_MSGF(EMLXS_CONTEXT,
1717 &emlxs_attach_failed_msg,
1718 "(PCI) ddi_regs_map_setup BAR0 failed. "
1719 "stat=%d mem=%p attr=%p hdl=%p",
1720 status, &hba->sli.sli4.bar0_addr, &dev_attr,
1721 &hba->sli.sli4.bar0_acc_handle);
1722 goto failed;
1723 }
1724 }
1725
1726 /* offset from beginning of register space */
1727 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1728 (uint32_t *)(hba->sli.sli4.bar0_addr +
1729 SLIPORT_SEMAPHORE_OFFSET);
1730 hba->sli.sli4.MBDB_reg_addr =
1731 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1732 hba->sli.sli4.CQDB_reg_addr =
1733 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_CQ_DB_OFFSET);
1734 hba->sli.sli4.MQDB_reg_addr =
1735 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MQ_DB_OFFSET);
1736 hba->sli.sli4.WQDB_reg_addr =
1737 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_WQ_DB_OFFSET);
1738 hba->sli.sli4.RQDB_reg_addr =
1739 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_RQ_DB_OFFSET);
1740
1741 hba->sli.sli4.STATUS_reg_addr =
1742 (uint32_t *)(hba->sli.sli4.bar0_addr +
1743 SLIPORT_STATUS_OFFSET);
1744 hba->sli.sli4.CNTL_reg_addr =
1745 (uint32_t *)(hba->sli.sli4.bar0_addr +
1746 SLIPORT_CONTROL_OFFSET);
1747 hba->sli.sli4.ERR1_reg_addr =
1748 (uint32_t *)(hba->sli.sli4.bar0_addr +
1749 SLIPORT_ERROR1_OFFSET);
1750 hba->sli.sli4.ERR2_reg_addr =
1751 (uint32_t *)(hba->sli.sli4.bar0_addr +
1752 SLIPORT_ERROR2_OFFSET);
1753 hba->sli.sli4.PHYSDEV_reg_addr =
1754 (uint32_t *)(hba->sli.sli4.bar0_addr +
1755 PHYSDEV_CONTROL_OFFSET);
1756
1757 break;
1758 case SLI_INTF_IF_TYPE_6:
1759 /* Map in Hardware BAR pages that will be used for */
1760 /* communication with HBA. */
1761 if (!emlxs_map_g7_bars(hba))
1762 goto failed;
1763 /* offset from beginning of register space */
1764 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1765 (uint32_t *)(hba->sli.sli4.bar0_addr +
1766 SLIPORT_SEMAPHORE_OFFSET);
1767 hba->sli.sli4.MBDB_reg_addr =
1768 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1769 hba->sli.sli4.EQDB_reg_addr =
1770 (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_EQ_DB_OFFSET);
1771 hba->sli.sli4.CQDB_reg_addr =
1772 (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_CQ_DB_OFFSET);
1773 hba->sli.sli4.MQDB_reg_addr =
1774 (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_MQ_DB_OFFSET);
1775 hba->sli.sli4.WQDB_reg_addr =
1776 (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_WQ_DB_OFFSET);
1777 hba->sli.sli4.RQDB_reg_addr =
1778 (uint32_t *)(hba->sli.sli4.bar1_addr + PD_IF6_RQ_DB_OFFSET);
1779
1780 hba->sli.sli4.STATUS_reg_addr =
1781 (uint32_t *)(hba->sli.sli4.bar0_addr +
1782 SLIPORT_STATUS_OFFSET);
1783 hba->sli.sli4.CNTL_reg_addr =
1784 (uint32_t *)(hba->sli.sli4.bar0_addr +
1785 SLIPORT_CONTROL_OFFSET);
1786 hba->sli.sli4.ERR1_reg_addr =
1787 (uint32_t *)(hba->sli.sli4.bar0_addr +
1788 SLIPORT_ERROR1_OFFSET);
1789 hba->sli.sli4.ERR2_reg_addr =
1790 (uint32_t *)(hba->sli.sli4.bar0_addr +
1791 SLIPORT_ERROR2_OFFSET);
1792 hba->sli.sli4.PHYSDEV_reg_addr =
1793 (uint32_t *)(hba->sli.sli4.bar0_addr +
1794 PHYSDEV_CONTROL_OFFSET);
1795
1796 break;
1797 case SLI_INTF_IF_TYPE_1:
1798 case SLI_INTF_IF_TYPE_3:
1799 default:
1800 EMLXS_MSGF(EMLXS_CONTEXT,
1801 &emlxs_attach_failed_msg,
1802 "Map hdw: Unsupported if_type %08x",
1803 (hba->sli_intf & SLI_INTF_IF_TYPE_MASK));
1804
1805 goto failed;
1806 }
1807
1808 if (hba->sli.sli4.bootstrapmb.virt == 0) {
1809 MBUF_INFO *buf_info;
1810 MBUF_INFO bufinfo;
1811
1812 buf_info = &bufinfo;
1813
1814 bzero(buf_info, sizeof (MBUF_INFO));
1815 buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1816 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1817 buf_info->align = ddi_ptob(dip, 1L);
1818
1819 (void) emlxs_mem_alloc(hba, buf_info);
1820
1821 if (buf_info->virt == NULL) {
1822 goto failed;
1823 }
1824
1825 hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1826 hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1827 hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1828 MBOX_EXTENSION_SIZE;
1829 hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1830 hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1831 bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1832 EMLXS_BOOTSTRAP_MB_SIZE);
1833 }
1834
1835 hba->chan_count = MAX_CHANNEL;
1836
1837 return (0);
1838
1839 failed:
1840
1841 emlxs_sli4_unmap_hdw(hba);
1842 return (ENOMEM);
1843
1844
1845 } /* emlxs_sli4_map_hdw() */
1846
1847
1848 /*ARGSUSED*/
1849 static void
emlxs_sli4_unmap_hdw(emlxs_hba_t * hba)1850 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1851 {
1852 MBUF_INFO bufinfo;
1853 MBUF_INFO *buf_info = &bufinfo;
1854
1855
1856 if (hba->sli.sli4.bar0_acc_handle) {
1857 ddi_regs_map_free(&hba->sli.sli4.bar0_acc_handle);
1858 hba->sli.sli4.bar0_acc_handle = 0;
1859 }
1860
1861 if (hba->sli.sli4.bar1_acc_handle) {
1862 ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1863 hba->sli.sli4.bar1_acc_handle = 0;
1864 }
1865
1866 if (hba->sli.sli4.bar2_acc_handle) {
1867 ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1868 hba->sli.sli4.bar2_acc_handle = 0;
1869 }
1870
1871 if (hba->sli.sli4.bootstrapmb.virt) {
1872 bzero(buf_info, sizeof (MBUF_INFO));
1873
1874 if (hba->sli.sli4.bootstrapmb.phys) {
1875 buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1876 buf_info->data_handle =
1877 hba->sli.sli4.bootstrapmb.data_handle;
1878 buf_info->dma_handle =
1879 hba->sli.sli4.bootstrapmb.dma_handle;
1880 buf_info->flags = FC_MBUF_DMA;
1881 }
1882
1883 buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1884 buf_info->size = hba->sli.sli4.bootstrapmb.size;
1885 emlxs_mem_free(hba, buf_info);
1886
1887 hba->sli.sli4.bootstrapmb.virt = NULL;
1888 }
1889
1890 return;
1891
1892 } /* emlxs_sli4_unmap_hdw() */
1893
1894
1895 static int
emlxs_check_hdw_ready(emlxs_hba_t * hba)1896 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1897 {
1898 emlxs_port_t *port = &PPORT;
1899 uint32_t status;
1900 uint32_t i = 0;
1901 uint32_t err1;
1902 uint32_t err2;
1903
1904 /* Wait for reset completion */
1905 while (i < 30) {
1906
1907 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1908 case SLI_INTF_IF_TYPE_0:
1909 status = emlxs_sli4_read_sema(hba);
1910
1911 /* Check to see if any errors occurred during init */
1912 if (status & ARM_POST_FATAL) {
1913 EMLXS_MSGF(EMLXS_CONTEXT,
1914 &emlxs_reset_failed_msg,
1915 "SEMA Error: status=%x", status);
1916
1917 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1918
1919 return (1);
1920 }
1921
1922 if ((status & ARM_UNRECOVERABLE_ERROR) ==
1923 ARM_UNRECOVERABLE_ERROR) {
1924 EMLXS_MSGF(EMLXS_CONTEXT,
1925 &emlxs_reset_failed_msg,
1926 "Unrecoverable Error: status=%x", status);
1927
1928 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1929
1930 return (1);
1931 }
1932
1933 if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1934 /* ARM Ready !! */
1935 EMLXS_MSGF(EMLXS_CONTEXT,
1936 &emlxs_sli_detail_msg,
1937 "ARM Ready: status=%x", status);
1938
1939 return (0);
1940 }
1941 break;
1942
1943 case SLI_INTF_IF_TYPE_2:
1944 case SLI_INTF_IF_TYPE_6:
1945 status = emlxs_sli4_read_status(hba);
1946
1947 if (status & SLI_STATUS_READY) {
1948 if (!(status & SLI_STATUS_ERROR)) {
1949 /* ARM Ready !! */
1950 EMLXS_MSGF(EMLXS_CONTEXT,
1951 &emlxs_sli_detail_msg,
1952 "ARM Ready: status=%x", status);
1953
1954 return (0);
1955 }
1956
1957 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1958 hba->sli.sli4.ERR1_reg_addr);
1959 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1960 hba->sli.sli4.ERR2_reg_addr);
1961
1962 if (status & SLI_STATUS_RESET_NEEDED) {
1963 EMLXS_MSGF(EMLXS_CONTEXT,
1964 &emlxs_sli_detail_msg,
1965 "ARM Ready (Reset Needed): "
1966 "status=%x err1=%x "
1967 "err2=%x",
1968 status, err1, err2);
1969
1970 return (1);
1971 }
1972
1973 EMLXS_MSGF(EMLXS_CONTEXT,
1974 &emlxs_reset_failed_msg,
1975 "Unrecoverable Error: status=%x err1=%x "
1976 "err2=%x",
1977 status, err1, err2);
1978
1979 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1980
1981 return (2);
1982 }
1983
1984 break;
1985
1986 default:
1987 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1988
1989 return (3);
1990 }
1991
1992 BUSYWAIT_MS(1000);
1993 i++;
1994 }
1995
1996 /* Timeout occurred */
1997 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1998 case SLI_INTF_IF_TYPE_0:
1999 err1 = ddi_get32(hba->pci_acc_handle,
2000 hba->sli.sli4.ERR1_reg_addr);
2001 err2 = ddi_get32(hba->pci_acc_handle,
2002 hba->sli.sli4.ERR2_reg_addr);
2003 break;
2004
2005 default:
2006 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2007 hba->sli.sli4.ERR1_reg_addr);
2008 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2009 hba->sli.sli4.ERR2_reg_addr);
2010 break;
2011 }
2012
2013 if (status & SLI_STATUS_ERROR) {
2014 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2015 "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
2016 status, err1, err2);
2017 } else {
2018 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2019 "Ready Timeout: status=%x err1=%x err2=%x",
2020 status, err1, err2);
2021 }
2022
2023 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2024
2025 return (3);
2026
2027 } /* emlxs_check_hdw_ready() */
2028
2029
2030 static uint32_t
emlxs_sli4_read_status(emlxs_hba_t * hba)2031 emlxs_sli4_read_status(emlxs_hba_t *hba)
2032 {
2033 #ifdef FMA_SUPPORT
2034 emlxs_port_t *port = &PPORT;
2035 #endif /* FMA_SUPPORT */
2036 uint32_t status;
2037
2038 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2039 case SLI_INTF_IF_TYPE_2:
2040 case SLI_INTF_IF_TYPE_6:
2041 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2042 hba->sli.sli4.STATUS_reg_addr);
2043 #ifdef FMA_SUPPORT
2044 /* Access handle validation */
2045 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
2046 #endif /* FMA_SUPPORT */
2047 break;
2048 default:
2049 status = 0;
2050 break;
2051 }
2052
2053 return (status);
2054
2055 } /* emlxs_sli4_read_status() */
2056
2057
2058 static uint32_t
emlxs_sli4_read_sema(emlxs_hba_t * hba)2059 emlxs_sli4_read_sema(emlxs_hba_t *hba)
2060 {
2061 #ifdef FMA_SUPPORT
2062 emlxs_port_t *port = &PPORT;
2063 #endif /* FMA_SUPPORT */
2064 uint32_t status;
2065
2066 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2067 case SLI_INTF_IF_TYPE_0:
2068 status = ddi_get32(hba->sli.sli4.bar1_acc_handle,
2069 hba->sli.sli4.MPUEPSemaphore_reg_addr);
2070 #ifdef FMA_SUPPORT
2071 /* Access handle validation */
2072 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
2073 #endif /* FMA_SUPPORT */
2074 break;
2075
2076 case SLI_INTF_IF_TYPE_2:
2077 case SLI_INTF_IF_TYPE_6:
2078 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2079 hba->sli.sli4.MPUEPSemaphore_reg_addr);
2080 #ifdef FMA_SUPPORT
2081 /* Access handle validation */
2082 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
2083 #endif /* FMA_SUPPORT */
2084 break;
2085 default:
2086 status = 0;
2087 break;
2088 }
2089
2090 return (status);
2091
2092 } /* emlxs_sli4_read_sema() */
2093
2094
2095 static uint32_t
emlxs_sli4_read_mbdb(emlxs_hba_t * hba)2096 emlxs_sli4_read_mbdb(emlxs_hba_t *hba)
2097 {
2098 #ifdef FMA_SUPPORT
2099 emlxs_port_t *port = &PPORT;
2100 #endif /* FMA_SUPPORT */
2101 uint32_t status;
2102
2103 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2104 case SLI_INTF_IF_TYPE_0:
2105 status = ddi_get32(hba->sli.sli4.bar2_acc_handle,
2106 hba->sli.sli4.MBDB_reg_addr);
2107
2108 #ifdef FMA_SUPPORT
2109 /* Access handle validation */
2110 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
2111 #endif /* FMA_SUPPORT */
2112 break;
2113
2114 case SLI_INTF_IF_TYPE_2:
2115 case SLI_INTF_IF_TYPE_6:
2116 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2117 hba->sli.sli4.MBDB_reg_addr);
2118 #ifdef FMA_SUPPORT
2119 /* Access handle validation */
2120 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
2121 #endif /* FMA_SUPPORT */
2122 break;
2123 default:
2124 status = 0;
2125 break;
2126 }
2127
2128 return (status);
2129
2130 } /* emlxs_sli4_read_mbdb() */
2131
2132
2133 static void
emlxs_sli4_write_mbdb(emlxs_hba_t * hba,uint64_t phys,boolean_t high)2134 emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys, boolean_t high)
2135 {
2136 uint32_t db;
2137 uint_t shift;
2138
2139 /*
2140 * The bootstrap mailbox is posted as 2 x 30 bit values.
2141 * It is required to be 16 bit aligned, and the 2 low order
2142 * bits are used as flags.
2143 */
2144 shift = high ? 32 : 2;
2145
2146 db = (uint32_t)(phys >> shift) & BMBX_ADDR;
2147
2148 if (high)
2149 db |= BMBX_ADDR_HI;
2150
2151 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2152 case SLI_INTF_IF_TYPE_0:
2153 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2154 hba->sli.sli4.MBDB_reg_addr, db);
2155 break;
2156
2157 case SLI_INTF_IF_TYPE_2:
2158 case SLI_INTF_IF_TYPE_6:
2159 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2160 hba->sli.sli4.MBDB_reg_addr, db);
2161 break;
2162 }
2163
2164 } /* emlxs_sli4_write_mbdb() */
2165
2166
2167 static void
emlxs_sli4_write_eqdb(emlxs_hba_t * hba,uint16_t qid,uint32_t count,boolean_t arm)2168 emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
2169 boolean_t arm)
2170 {
2171 emlxs_eqdb_u db;
2172 db.word = 0;
2173
2174 /*
2175 * Add the qid to the doorbell. It is split into a low and
2176 * high component.
2177 */
2178
2179 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_6) {
2180 db.db6.Qid = qid;
2181 db.db6.NumPopped = count;
2182 db.db6.Rearm = arm;
2183 } else {
2184 /* Initialize with the low bits */
2185 db.db2.Qid = qid & EQ_DB_ID_LO_MASK;
2186
2187 /* Add the high bits */
2188 db.db2.Qid_hi = (qid >> EQ_ID_LO_BITS) & 0x1f;
2189
2190 /*
2191 * Include the number of entries to be popped.
2192 */
2193 db.db2.NumPopped = count;
2194
2195 /* The doorbell is for an event queue */
2196 db.db2.Event = B_TRUE;
2197
2198 /* Arm if asked to do so */
2199 if (arm)
2200 /* Clear only on not AutoValid EqAV */
2201 db.db2.Clear = B_TRUE;
2202 db.db2.Rearm = arm;
2203 }
2204
2205 #ifdef DEBUG_FASTPATH
2206 EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2207 "EQE: CLEAR db=%08x pops=%d", db, count);
2208 #endif /* DEBUG_FASTPATH */
2209
2210 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2211 case SLI_INTF_IF_TYPE_0:
2212 /* The CQDB_reg_addr is also use for EQs */
2213 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2214 hba->sli.sli4.CQDB_reg_addr, db.word);
2215 break;
2216
2217 case SLI_INTF_IF_TYPE_2:
2218 /* The CQDB_reg_addr is also use for EQs */
2219 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2220 hba->sli.sli4.CQDB_reg_addr, db.word);
2221 break;
2222
2223 case SLI_INTF_IF_TYPE_6:
2224 ddi_put32(hba->sli.sli4.bar1_acc_handle,
2225 hba->sli.sli4.EQDB_reg_addr, db.word);
2226 break;
2227
2228 }
2229 } /* emlxs_sli4_write_eqdb() */
2230
2231 static void
emlxs_sli4_write_cqdb(emlxs_hba_t * hba,uint16_t qid,uint32_t count,boolean_t arm)2232 emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
2233 boolean_t arm)
2234 {
2235 emlxs_cqdb_u db;
2236 db.word = 0;
2237
2238 /*
2239 * Add the qid to the doorbell. It is split into a low and
2240 * high component.
2241 */
2242
2243 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_6) {
2244 db.db6.Qid = qid;
2245 db.db6.NumPopped = count;
2246 db.db6.Rearm = arm;
2247 } else {
2248 /* Initialize with the low bits */
2249 db.db2.Qid = qid & CQ_DB_ID_LO_MASK;
2250
2251 /* Add the high bits */
2252 db.db2.Qid_hi = (qid >> CQ_ID_LO_BITS) & 0x1f;
2253
2254 /*
2255 * Include the number of entries to be popped.
2256 */
2257 db.db2.NumPopped = count;
2258
2259 /* Arm if asked to do so */
2260 db.db2.Rearm = arm;
2261 }
2262 #ifdef DEBUG_FASTPATH
2263 EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2264 "CQE: db=%08x: pops=%d", db, count);
2265 #endif /* DEBUG_FASTPATH */
2266
2267 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2268 case SLI_INTF_IF_TYPE_0:
2269 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2270 hba->sli.sli4.CQDB_reg_addr, db.word);
2271 break;
2272
2273 case SLI_INTF_IF_TYPE_2:
2274 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2275 hba->sli.sli4.CQDB_reg_addr, db.word);
2276 break;
2277
2278 case SLI_INTF_IF_TYPE_6:
2279 ddi_put32(hba->sli.sli4.bar1_acc_handle,
2280 hba->sli.sli4.CQDB_reg_addr, db.word);
2281 break;
2282 }
2283 } /* emlxs_sli4_write_cqdb() */
2284
2285
2286 static void
emlxs_sli4_write_rqdb(emlxs_hba_t * hba,uint16_t qid,uint_t count)2287 emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2288 {
2289 emlxs_rqdbu_t rqdb;
2290
2291 rqdb.word = 0;
2292 rqdb.db.Qid = qid;
2293 rqdb.db.NumPosted = count;
2294
2295 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2296 case SLI_INTF_IF_TYPE_0:
2297 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2298 hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2299 break;
2300
2301 case SLI_INTF_IF_TYPE_2:
2302 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2303 hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2304 break;
2305
2306 case SLI_INTF_IF_TYPE_6:
2307 ddi_put32(hba->sli.sli4.bar1_acc_handle,
2308 hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2309 break;
2310
2311 }
2312
2313 } /* emlxs_sli4_write_rqdb() */
2314
2315
2316 static void
emlxs_sli4_write_mqdb(emlxs_hba_t * hba,uint16_t qid,uint_t count)2317 emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2318 {
2319 uint32_t db;
2320
2321 db = qid;
2322 db |= (count << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK;
2323
2324 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2325 case SLI_INTF_IF_TYPE_0:
2326 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2327 hba->sli.sli4.MQDB_reg_addr, db);
2328 break;
2329
2330 case SLI_INTF_IF_TYPE_2:
2331 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2332 hba->sli.sli4.MQDB_reg_addr, db);
2333 break;
2334 case SLI_INTF_IF_TYPE_6:
2335 ddi_put32(hba->sli.sli4.bar1_acc_handle,
2336 hba->sli.sli4.MQDB_reg_addr, db);
2337 break;
2338 }
2339
2340 } /* emlxs_sli4_write_mqdb() */
2341
2342
2343 static void
emlxs_sli4_write_wqdb(emlxs_hba_t * hba,uint16_t qid,uint_t posted,uint_t index)2344 emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid, uint_t posted,
2345 uint_t index)
2346 {
2347 uint32_t db;
2348
2349 db = qid;
2350 db |= (posted << WQ_DB_POST_SHIFT) & WQ_DB_POST_MASK;
2351
2352 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2353 case SLI_INTF_IF_TYPE_0:
2354 db |= (index << WQ_DB_IDX_SHIFT) & WQ_DB_IDX_MASK;
2355 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2356 hba->sli.sli4.WQDB_reg_addr, db);
2357 break;
2358
2359 case SLI_INTF_IF_TYPE_2:
2360 db |= (index << WQ_DB_IDX_SHIFT) & WQ_DB_IDX_MASK;
2361 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2362 hba->sli.sli4.WQDB_reg_addr, db);
2363 break;
2364
2365 case SLI_INTF_IF_TYPE_6:
2366 ddi_put32(hba->sli.sli4.bar1_acc_handle,
2367 hba->sli.sli4.WQDB_reg_addr, db);
2368 break;
2369
2370 }
2371
2372 #ifdef DEBUG_FASTPATH
2373 EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2374 "WQ RING: %08x", db);
2375 #endif /* DEBUG_FASTPATH */
2376 } /* emlxs_sli4_write_wqdb() */
2377
2378
2379 static uint32_t
emlxs_check_bootstrap_ready(emlxs_hba_t * hba,uint32_t tmo)2380 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
2381 {
2382 emlxs_port_t *port = &PPORT;
2383 uint32_t status = 0;
2384 uint32_t err1;
2385 uint32_t err2;
2386
2387 /* Wait for reset completion, tmo is in 10ms ticks */
2388 while (tmo) {
2389 status = emlxs_sli4_read_mbdb(hba);
2390
2391 /* Check to see if any errors occurred during init */
2392 if (status & BMBX_READY) {
2393 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2394 "BMBX Ready: status=0x%x", status);
2395
2396 return (tmo);
2397 }
2398
2399 BUSYWAIT_MS(10);
2400 tmo--;
2401 }
2402
2403 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2404 case SLI_INTF_IF_TYPE_0:
2405 err1 = ddi_get32(hba->pci_acc_handle,
2406 hba->sli.sli4.ERR1_reg_addr);
2407 err2 = ddi_get32(hba->pci_acc_handle,
2408 hba->sli.sli4.ERR2_reg_addr);
2409 break;
2410
2411 default: /* IF_TYPE_2 and IF_TYPE_6 */
2412 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2413 hba->sli.sli4.ERR1_reg_addr);
2414 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2415 hba->sli.sli4.ERR2_reg_addr);
2416 break;
2417 }
2418
2419 /* Timeout occurred */
2420 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2421 "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2422 status, err1, err2);
2423
2424 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2425
2426 return (0);
2427
2428 } /* emlxs_check_bootstrap_ready() */
2429
2430
2431 static uint32_t
emlxs_issue_bootstrap_mb(emlxs_hba_t * hba,uint32_t tmo)2432 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
2433 {
2434 emlxs_port_t *port = &PPORT;
2435 uint32_t *iptr;
2436
2437 /*
2438 * This routine assumes the bootstrap mbox is loaded
2439 * with the mailbox command to be executed.
2440 *
2441 * First, load the high 30 bits of bootstrap mailbox
2442 */
2443 emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_TRUE);
2444
2445 tmo = emlxs_check_bootstrap_ready(hba, tmo);
2446 if (tmo == 0) {
2447 return (0);
2448 }
2449
2450 /* Load the low 30 bits of bootstrap mailbox */
2451 emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_FALSE);
2452
2453 tmo = emlxs_check_bootstrap_ready(hba, tmo);
2454 if (tmo == 0) {
2455 return (0);
2456 }
2457
2458 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2459
2460 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2461 "BootstrapMB: %p Completed %08x %08x %08x",
2462 hba->sli.sli4.bootstrapmb.virt,
2463 *iptr, *(iptr+1), *(iptr+2));
2464
2465 return (tmo);
2466
2467 } /* emlxs_issue_bootstrap_mb() */
2468
2469
2470 static int
emlxs_init_bootstrap_mb(emlxs_hba_t * hba)2471 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
2472 {
2473 #ifdef FMA_SUPPORT
2474 emlxs_port_t *port = &PPORT;
2475 #endif /* FMA_SUPPORT */
2476 uint32_t *iptr;
2477 uint32_t tmo;
2478
2479 if (emlxs_check_hdw_ready(hba)) {
2480 return (1);
2481 }
2482
2483 if (hba->flag & FC_BOOTSTRAPMB_INIT) {
2484 return (0); /* Already initialized */
2485 }
2486
2487 /* NOTE: tmo is in 10ms ticks */
2488 tmo = emlxs_check_bootstrap_ready(hba, 3000);
2489 if (tmo == 0) {
2490 return (1);
2491 }
2492
2493 /* Issue FW_INITIALIZE command */
2494
2495 /* Special words to initialize bootstrap mbox MUST be little endian */
2496 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2497 *iptr = LE_SWAP32(FW_INITIALIZE_WORD0);
2498 *(iptr+1) = LE_SWAP32(FW_INITIALIZE_WORD1);
2499
2500 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2501 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
2502
2503 emlxs_data_dump(port, "FW_INIT", (uint32_t *)iptr, 6, 0);
2504 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2505 return (1);
2506 }
2507
2508 #ifdef FMA_SUPPORT
2509 if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
2510 != DDI_FM_OK) {
2511 EMLXS_MSGF(EMLXS_CONTEXT,
2512 &emlxs_invalid_dma_handle_msg,
2513 "init_bootstrap_mb: hdl=%p",
2514 hba->sli.sli4.bootstrapmb.dma_handle);
2515 return (1);
2516 }
2517 #endif
2518 hba->flag |= FC_BOOTSTRAPMB_INIT;
2519 return (0);
2520
2521 } /* emlxs_init_bootstrap_mb() */
2522
2523
2524
2525
2526 static uint32_t
emlxs_sli4_hba_init(emlxs_hba_t * hba)2527 emlxs_sli4_hba_init(emlxs_hba_t *hba)
2528 {
2529 int rc;
2530 uint16_t i;
2531 emlxs_port_t *vport;
2532 emlxs_config_t *cfg = &CFG;
2533 CHANNEL *cp;
2534 VPIobj_t *vpip;
2535
2536 /* Restart the adapter */
2537 if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
2538 return (1);
2539 }
2540
2541 for (i = 0; i < hba->chan_count; i++) {
2542 cp = &hba->chan[i];
2543 cp->iopath = (void *)&hba->sli.sli4.wq[i];
2544 }
2545
2546 /* Initialize all the port objects */
2547 hba->vpi_max = 0;
2548 for (i = 0; i < MAX_VPORTS; i++) {
2549 vport = &VPORT(i);
2550 vport->hba = hba;
2551 vport->vpi = i;
2552
2553 vpip = &vport->VPIobj;
2554 vpip->index = i;
2555 vpip->VPI = i;
2556 vpip->port = vport;
2557 vpip->state = VPI_STATE_OFFLINE;
2558 vport->vpip = vpip;
2559 }
2560
2561 /* Set the max node count */
2562 if (hba->max_nodes == 0) {
2563 if (cfg[CFG_NUM_NODES].current > 0) {
2564 hba->max_nodes = cfg[CFG_NUM_NODES].current;
2565 } else {
2566 hba->max_nodes = 4096;
2567 }
2568 }
2569
2570 rc = emlxs_init_bootstrap_mb(hba);
2571 if (rc) {
2572 return (rc);
2573 }
2574
2575 hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
2576 hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
2577 hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
2578
2579 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_0) {
2580 /* Cache the UE MASK registers value for UE error detection */
2581 hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
2582 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
2583 hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
2584 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
2585 }
2586
2587 return (0);
2588
2589 } /* emlxs_sli4_hba_init() */
2590
2591
2592 /*ARGSUSED*/
2593 static uint32_t
emlxs_sli4_hba_reset(emlxs_hba_t * hba,uint32_t restart,uint32_t skip_post,uint32_t quiesce)2594 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2595 uint32_t quiesce)
2596 {
2597 emlxs_port_t *port = &PPORT;
2598 emlxs_port_t *vport;
2599 CHANNEL *cp;
2600 emlxs_config_t *cfg = &CFG;
2601 MAILBOXQ mboxq;
2602 uint32_t value;
2603 uint32_t i;
2604 uint32_t rc;
2605 uint16_t channelno;
2606 uint32_t status;
2607 uint32_t err1;
2608 uint32_t err2;
2609 uint8_t generate_event = 0;
2610
2611 if (!cfg[CFG_RESET_ENABLE].current) {
2612 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2613 "Adapter reset disabled.");
2614 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2615
2616 return (1);
2617 }
2618
2619 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2620 case SLI_INTF_IF_TYPE_0:
2621 if (quiesce == 0) {
2622 emlxs_sli4_hba_kill(hba);
2623
2624 /*
2625 * Initalize Hardware that will be used to bring
2626 * SLI4 online.
2627 */
2628 rc = emlxs_init_bootstrap_mb(hba);
2629 if (rc) {
2630 return (rc);
2631 }
2632 }
2633
2634 bzero((void *)&mboxq, sizeof (MAILBOXQ));
2635 emlxs_mb_resetport(hba, &mboxq);
2636
2637 if (quiesce == 0) {
2638 if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
2639 MBX_POLL, 0) != MBX_SUCCESS) {
2640 /* Timeout occurred */
2641 EMLXS_MSGF(EMLXS_CONTEXT,
2642 &emlxs_reset_failed_msg,
2643 "Timeout: RESET");
2644 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2645 /* Log a dump event - not supported */
2646 return (1);
2647 }
2648 } else {
2649 if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
2650 MBX_POLL, 0) != MBX_SUCCESS) {
2651 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2652 /* Log a dump event - not supported */
2653 return (1);
2654 }
2655 }
2656 emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
2657 break;
2658
2659 case SLI_INTF_IF_TYPE_2:
2660 case SLI_INTF_IF_TYPE_6:
2661 if (quiesce == 0) {
2662 emlxs_sli4_hba_kill(hba);
2663 }
2664
2665 rc = emlxs_check_hdw_ready(hba);
2666 if (rc > 1) {
2667 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
2668 "Adapter not ready for reset.");
2669 return (1);
2670 }
2671
2672 if (rc == 1) {
2673 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2674 hba->sli.sli4.ERR1_reg_addr);
2675 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2676 hba->sli.sli4.ERR2_reg_addr);
2677
2678 /* Don't generate an event if dump was forced */
2679 if ((err1 != 0x2) || (err2 != 0x2)) {
2680 generate_event = 1;
2681 }
2682 }
2683
2684 /* Reset the port now */
2685
2686 mutex_enter(&EMLXS_PORT_LOCK);
2687 value = SLI_CNTL_INIT_PORT;
2688
2689 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2690 hba->sli.sli4.CNTL_reg_addr, value);
2691 mutex_exit(&EMLXS_PORT_LOCK);
2692
2693 break;
2694 }
2695
2696 /* Reset the hba structure */
2697 hba->flag &= FC_RESET_MASK;
2698
2699 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2700 cp = &hba->chan[channelno];
2701 cp->hba = hba;
2702 cp->channelno = channelno;
2703 }
2704
2705 hba->channel_tx_count = 0;
2706 hba->io_count = 0;
2707 hba->iodone_count = 0;
2708 hba->topology = 0;
2709 hba->linkspeed = 0;
2710 hba->heartbeat_active = 0;
2711 hba->discovery_timer = 0;
2712 hba->linkup_timer = 0;
2713 hba->loopback_tics = 0;
2714
2715 /* Specific to ATTO G5 boards */
2716 if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
2717 /* Assume the boot driver enabled all LEDs */
2718 hba->gpio_current =
2719 EMLXS_GPIO_LO | EMLXS_GPIO_HI | EMLXS_GPIO_ACT;
2720 hba->gpio_desired = 0;
2721 hba->gpio_bit = 0;
2722 }
2723
2724 /* Reset the port objects */
2725 for (i = 0; i < MAX_VPORTS; i++) {
2726 vport = &VPORT(i);
2727
2728 vport->flag &= EMLXS_PORT_RESET_MASK;
2729 vport->did = 0;
2730 vport->prev_did = 0;
2731 vport->lip_type = 0;
2732 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2733 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2734
2735 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2736 vport->node_base.nlp_Rpi = 0;
2737 vport->node_base.nlp_DID = 0xffffff;
2738 vport->node_base.nlp_list_next = NULL;
2739 vport->node_base.nlp_list_prev = NULL;
2740 vport->node_base.nlp_active = 1;
2741 vport->node_count = 0;
2742
2743 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2744 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2745 }
2746 }
2747
2748 if (emlxs_check_hdw_ready(hba)) {
2749 return (1);
2750 }
2751
2752 if (generate_event) {
2753 status = emlxs_sli4_read_status(hba);
2754 if (status & SLI_STATUS_DUMP_IMAGE_PRESENT) {
2755 emlxs_log_dump_event(port, NULL, 0);
2756 }
2757 }
2758
2759 return (0);
2760
2761 } /* emlxs_sli4_hba_reset */
2762
2763
2764 #define SGL_CMD 0
2765 #define SGL_RESP 1
2766 #define SGL_DATA 2
2767 #define SGL_LAST 0x80
2768
2769 /*ARGSUSED*/
2770 static ULP_SGE64 *
emlxs_pkt_to_sgl(emlxs_port_t * port,fc_packet_t * pkt,ULP_SGE64 * sge,uint32_t sgl_type,uint32_t * pcnt)2771 emlxs_pkt_to_sgl(emlxs_port_t *port, fc_packet_t *pkt, ULP_SGE64 *sge,
2772 uint32_t sgl_type, uint32_t *pcnt)
2773 {
2774 #ifdef DEBUG_SGE
2775 emlxs_hba_t *hba = HBA;
2776 #endif /* DEBUG_SGE */
2777 ddi_dma_cookie_t *cp;
2778 uint_t i;
2779 uint_t last;
2780 int32_t size;
2781 int32_t sge_size;
2782 uint64_t sge_addr;
2783 int32_t len;
2784 uint32_t cnt;
2785 uint_t cookie_cnt;
2786 ULP_SGE64 stage_sge;
2787
2788 last = sgl_type & SGL_LAST;
2789 sgl_type &= ~SGL_LAST;
2790
2791 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2792 switch (sgl_type) {
2793 case SGL_CMD:
2794 cp = pkt->pkt_cmd_cookie;
2795 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2796 size = (int32_t)pkt->pkt_cmdlen;
2797 break;
2798
2799 case SGL_RESP:
2800 cp = pkt->pkt_resp_cookie;
2801 cookie_cnt = pkt->pkt_resp_cookie_cnt;
2802 size = (int32_t)pkt->pkt_rsplen;
2803 break;
2804
2805
2806 case SGL_DATA:
2807 cp = pkt->pkt_data_cookie;
2808 cookie_cnt = pkt->pkt_data_cookie_cnt;
2809 size = (int32_t)pkt->pkt_datalen;
2810 break;
2811
2812 default:
2813 return (NULL);
2814 }
2815
2816 #else
2817 switch (sgl_type) {
2818 case SGL_CMD:
2819 cp = &pkt->pkt_cmd_cookie;
2820 cookie_cnt = 1;
2821 size = (int32_t)pkt->pkt_cmdlen;
2822 break;
2823
2824 case SGL_RESP:
2825 cp = &pkt->pkt_resp_cookie;
2826 cookie_cnt = 1;
2827 size = (int32_t)pkt->pkt_rsplen;
2828 break;
2829
2830
2831 case SGL_DATA:
2832 cp = &pkt->pkt_data_cookie;
2833 cookie_cnt = 1;
2834 size = (int32_t)pkt->pkt_datalen;
2835 break;
2836
2837 default:
2838 return (NULL);
2839 }
2840 #endif /* >= EMLXS_MODREV3 */
2841
2842 stage_sge.offset = 0;
2843 stage_sge.type = 0;
2844 stage_sge.last = 0;
2845 cnt = 0;
2846 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2847
2848 sge_size = cp->dmac_size;
2849 sge_addr = cp->dmac_laddress;
2850 while (sge_size && size) {
2851 if (cnt) {
2852 /* Copy staged SGE before we build next one */
2853 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2854 (uint8_t *)sge, sizeof (ULP_SGE64));
2855 sge++;
2856 }
2857 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2858 len = MIN(size, len);
2859
2860 stage_sge.addrHigh =
2861 PADDR_HI(sge_addr);
2862 stage_sge.addrLow =
2863 PADDR_LO(sge_addr);
2864 stage_sge.length = len;
2865 if (sgl_type == SGL_DATA) {
2866 stage_sge.offset = cnt;
2867 }
2868 #ifdef DEBUG_SGE
2869 emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
2870 4, 0);
2871 #endif /* DEBUG_SGE */
2872 sge_addr += len;
2873 sge_size -= len;
2874
2875 cnt += len;
2876 size -= len;
2877 }
2878 }
2879
2880 if (last) {
2881 stage_sge.last = 1;
2882 }
2883 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2884 sizeof (ULP_SGE64));
2885
2886 sge++;
2887
2888 if (pcnt) {
2889 *pcnt = cnt;
2890 }
2891 return (sge);
2892
2893 } /* emlxs_pkt_to_sgl */
2894
2895
2896 /*ARGSUSED*/
2897 uint32_t
emlxs_sli4_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2898 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2899 {
2900 emlxs_hba_t *hba = HBA;
2901 fc_packet_t *pkt;
2902 XRIobj_t *xrip;
2903 ULP_SGE64 *sge;
2904 emlxs_wqe_t *wqe;
2905 IOCBQ *iocbq;
2906 ddi_dma_cookie_t *cp_cmd;
2907 ddi_dma_cookie_t *cp_data;
2908 uint64_t sge_addr;
2909 uint32_t cmd_cnt;
2910 uint32_t resp_cnt;
2911
2912 iocbq = (IOCBQ *) &sbp->iocbq;
2913 wqe = &iocbq->wqe;
2914 pkt = PRIV2PKT(sbp);
2915 xrip = sbp->xrip;
2916 sge = xrip->SGList->virt;
2917
2918 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2919 cp_cmd = pkt->pkt_cmd_cookie;
2920 cp_data = pkt->pkt_data_cookie;
2921 #else
2922 cp_cmd = &pkt->pkt_cmd_cookie;
2923 cp_data = &pkt->pkt_data_cookie;
2924 #endif /* >= EMLXS_MODREV3 */
2925
2926 iocbq = &sbp->iocbq;
2927 if (iocbq->flag & IOCB_FCP_CMD) {
2928
2929 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2930 return (1);
2931 }
2932
2933 /* CMD payload */
2934 sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2935 if (! sge) {
2936 return (1);
2937 }
2938
2939 /* DATA payload */
2940 if (pkt->pkt_datalen != 0) {
2941 /* RSP payload */
2942 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2943 SGL_RESP, &resp_cnt);
2944 if (! sge) {
2945 return (1);
2946 }
2947
2948 /* Data payload */
2949 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2950 SGL_DATA | SGL_LAST, 0);
2951 if (! sge) {
2952 return (1);
2953 }
2954 sgl_done:
2955 if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2956 sge_addr = cp_data->dmac_laddress;
2957 wqe->FirstData.addrHigh = PADDR_HI(sge_addr);
2958 wqe->FirstData.addrLow = PADDR_LO(sge_addr);
2959 wqe->FirstData.tus.f.bdeSize =
2960 cp_data->dmac_size;
2961 }
2962 } else {
2963 /* RSP payload */
2964 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2965 SGL_RESP | SGL_LAST, &resp_cnt);
2966 if (! sge) {
2967 return (1);
2968 }
2969 }
2970
2971 wqe->un.FcpCmd.Payload.addrHigh =
2972 PADDR_HI(cp_cmd->dmac_laddress);
2973 wqe->un.FcpCmd.Payload.addrLow =
2974 PADDR_LO(cp_cmd->dmac_laddress);
2975 wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
2976 wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
2977
2978 } else {
2979
2980 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2981 /* CMD payload */
2982 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2983 SGL_CMD | SGL_LAST, &cmd_cnt);
2984 if (! sge) {
2985 return (1);
2986 }
2987 } else {
2988 /* CMD payload */
2989 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2990 SGL_CMD, &cmd_cnt);
2991 if (! sge) {
2992 return (1);
2993 }
2994
2995 /* RSP payload */
2996 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2997 SGL_RESP | SGL_LAST, &resp_cnt);
2998 if (! sge) {
2999 return (1);
3000 }
3001 wqe->un.GenReq.PayloadLength = cmd_cnt;
3002 }
3003
3004 wqe->un.GenReq.Payload.addrHigh =
3005 PADDR_HI(cp_cmd->dmac_laddress);
3006 wqe->un.GenReq.Payload.addrLow =
3007 PADDR_LO(cp_cmd->dmac_laddress);
3008 wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
3009 }
3010 return (0);
3011 } /* emlxs_sli4_bde_setup */
3012
3013
3014
3015
3016 #ifdef SFCT_SUPPORT
3017 /*ARGSUSED*/
3018 static uint32_t
emlxs_sli4_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)3019 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
3020 {
3021 emlxs_hba_t *hba = HBA;
3022 emlxs_wqe_t *wqe;
3023 ULP_SGE64 stage_sge;
3024 ULP_SGE64 *sge;
3025 IOCB *iocb;
3026 IOCBQ *iocbq;
3027 MATCHMAP *mp;
3028 MATCHMAP *fct_mp;
3029 XRIobj_t *xrip;
3030 uint64_t sge_addr;
3031 uint32_t sge_size;
3032 uint32_t cnt;
3033 uint32_t len;
3034 uint32_t size;
3035 uint32_t *xrdy_vaddr;
3036 stmf_data_buf_t *dbuf;
3037
3038 iocbq = &sbp->iocbq;
3039 iocb = &iocbq->iocb;
3040 wqe = &iocbq->wqe;
3041 xrip = sbp->xrip;
3042
3043 if (!sbp->fct_buf) {
3044 return (0);
3045 }
3046
3047 size = sbp->fct_buf->db_data_size;
3048
3049 /*
3050 * The hardware will automaticlly round up
3051 * to multiple of 4.
3052 *
3053 * if (size & 3) {
3054 * size = (size + 3) & 0xfffffffc;
3055 * }
3056 */
3057 fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
3058
3059 if (sbp->fct_buf->db_sglist_length != 1) {
3060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
3061 "fct_bde_setup: Only 1 sglist entry supported: %d",
3062 sbp->fct_buf->db_sglist_length);
3063 return (1);
3064 }
3065
3066 sge = xrip->SGList->virt;
3067
3068 if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
3069
3070 mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
3071 if (!mp || !mp->virt || !mp->phys) {
3072 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
3073 "fct_bde_setup: Cannot allocate XRDY memory");
3074 return (1);
3075 }
3076 /* Save the MATCHMAP info to free this memory later */
3077 iocbq->bp = mp;
3078
3079 /* Point to XRDY payload */
3080 xrdy_vaddr = (uint32_t *)(mp->virt);
3081
3082 /* Fill in burstsize in payload */
3083 *xrdy_vaddr++ = 0;
3084 *xrdy_vaddr++ = LE_SWAP32(size);
3085 *xrdy_vaddr = 0;
3086
3087 /* First 2 SGEs are XRDY and SKIP */
3088 stage_sge.addrHigh = PADDR_HI(mp->phys);
3089 stage_sge.addrLow = PADDR_LO(mp->phys);
3090 stage_sge.length = EMLXS_XFER_RDY_SIZE;
3091 stage_sge.offset = 0;
3092 stage_sge.type = 0;
3093 stage_sge.last = 0;
3094
3095 /* Words 0-3 */
3096 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
3097 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
3098 wqe->un.FcpCmd.Payload.tus.f.bdeSize = EMLXS_XFER_RDY_SIZE;
3099 wqe->un.FcpCmd.PayloadLength = EMLXS_XFER_RDY_SIZE;
3100
3101 } else { /* CMD_FCP_TSEND64_CX */
3102 /* First 2 SGEs are SKIP */
3103 stage_sge.addrHigh = 0;
3104 stage_sge.addrLow = 0;
3105 stage_sge.length = 0;
3106 stage_sge.offset = 0;
3107 stage_sge.type = EMLXS_SGE_TYPE_SKIP;
3108 stage_sge.last = 0;
3109
3110 /* Words 0-3 */
3111 wqe->un.FcpCmd.Payload.addrHigh = PADDR_HI(fct_mp->phys);
3112 wqe->un.FcpCmd.Payload.addrLow = PADDR_LO(fct_mp->phys);
3113
3114 /* The BDE should match the contents of the first SGE payload */
3115 len = MIN(EMLXS_MAX_SGE_SIZE, size);
3116 wqe->un.FcpCmd.Payload.tus.f.bdeSize = len;
3117
3118 /* The PayloadLength should be set to 0 for TSEND64. */
3119 wqe->un.FcpCmd.PayloadLength = 0;
3120 }
3121
3122 dbuf = sbp->fct_buf;
3123 /*
3124 * TotalTransferCount equals to Relative Offset field (Word 4)
3125 * in both TSEND64 and TRECEIVE64 WQE.
3126 */
3127 wqe->un.FcpCmd.TotalTransferCount = dbuf->db_relative_offset;
3128
3129 /* Copy staged SGE into SGL */
3130 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3131 (uint8_t *)sge, sizeof (ULP_SGE64));
3132 sge++;
3133
3134 stage_sge.addrHigh = 0;
3135 stage_sge.addrLow = 0;
3136 stage_sge.length = 0;
3137 stage_sge.offset = 0;
3138 stage_sge.type = EMLXS_SGE_TYPE_SKIP;
3139 stage_sge.last = 0;
3140
3141 /* Copy staged SGE into SGL */
3142 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3143 (uint8_t *)sge, sizeof (ULP_SGE64));
3144 sge++;
3145
3146 sge_size = size;
3147 sge_addr = fct_mp->phys;
3148 cnt = 0;
3149
3150 /* Build SGEs */
3151 while (sge_size) {
3152 if (cnt) {
3153 /* Copy staged SGE before we build next one */
3154 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3155 (uint8_t *)sge, sizeof (ULP_SGE64));
3156 sge++;
3157 }
3158
3159 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
3160
3161 stage_sge.addrHigh = PADDR_HI(sge_addr);
3162 stage_sge.addrLow = PADDR_LO(sge_addr);
3163 stage_sge.length = len;
3164 stage_sge.offset = cnt;
3165 stage_sge.type = EMLXS_SGE_TYPE_DATA;
3166
3167 sge_addr += len;
3168 sge_size -= len;
3169 cnt += len;
3170 }
3171
3172 stage_sge.last = 1;
3173
3174 if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
3175 wqe->FirstData.addrHigh = stage_sge.addrHigh;
3176 wqe->FirstData.addrLow = stage_sge.addrLow;
3177 wqe->FirstData.tus.f.bdeSize = stage_sge.length;
3178 }
3179 /* Copy staged SGE into SGL */
3180 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
3181 (uint8_t *)sge, sizeof (ULP_SGE64));
3182
3183 return (0);
3184
3185 } /* emlxs_sli4_fct_bde_setup */
3186 #endif /* SFCT_SUPPORT */
3187
3188
3189 static void
emlxs_sli4_issue_iocb_cmd(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)3190 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
3191 {
3192 emlxs_port_t *port = &PPORT;
3193 emlxs_buf_t *sbp;
3194 uint32_t channelno;
3195 int32_t throttle;
3196 emlxs_wqe_t *wqe;
3197 emlxs_wqe_t *wqeslot;
3198 WQ_DESC_t *wq;
3199 uint32_t flag;
3200 uint16_t next_wqe;
3201 off_t offset;
3202 #ifdef NODE_THROTTLE_SUPPORT
3203 int32_t node_throttle;
3204 NODELIST *marked_node = NULL;
3205 #endif /* NODE_THROTTLE_SUPPORT */
3206
3207
3208 channelno = cp->channelno;
3209 wq = (WQ_DESC_t *)cp->iopath;
3210
3211 #ifdef DEBUG_FASTPATH
3212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3213 "ISSUE WQE channel: %x %p", channelno, wq);
3214 #endif /* DEBUG_FASTPATH */
3215
3216 throttle = 0;
3217
3218 /* Check if FCP ring and adapter is not ready */
3219 /* We may use any ring for FCP_CMD */
3220 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
3221 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
3222 (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
3223 emlxs_tx_put(iocbq, 1);
3224 return;
3225 }
3226 }
3227
3228 /* Attempt to acquire CMD_RING lock */
3229 if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
3230 /* Queue it for later */
3231 if (iocbq) {
3232 if ((hba->io_count -
3233 hba->channel_tx_count) > 10) {
3234 emlxs_tx_put(iocbq, 1);
3235 return;
3236 } else {
3237
3238 mutex_enter(&EMLXS_QUE_LOCK(channelno));
3239 }
3240 } else {
3241 return;
3242 }
3243 }
3244 /* EMLXS_QUE_LOCK acquired */
3245
3246 /* Throttle check only applies to non special iocb */
3247 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
3248 /* Check if HBA is full */
3249 throttle = hba->io_throttle - hba->io_active;
3250 if (throttle <= 0) {
3251 /* Hitting adapter throttle limit */
3252 /* Queue it for later */
3253 if (iocbq) {
3254 emlxs_tx_put(iocbq, 1);
3255 }
3256
3257 goto busy;
3258 }
3259 }
3260
3261 /* Check to see if we have room for this WQE */
3262 next_wqe = wq->host_index + 1;
3263 if (next_wqe >= wq->max_index) {
3264 next_wqe = 0;
3265 }
3266
3267 if (next_wqe == wq->port_index) {
3268 /* Queue it for later */
3269 if (iocbq) {
3270 emlxs_tx_put(iocbq, 1);
3271 }
3272 goto busy;
3273 }
3274
3275 /*
3276 * We have a command ring slot available
3277 * Make sure we have an iocb to send
3278 */
3279 if (iocbq) {
3280 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3281
3282 /* Check if the ring already has iocb's waiting */
3283 if (cp->nodeq.q_first != NULL) {
3284 /* Put the current iocbq on the tx queue */
3285 emlxs_tx_put(iocbq, 0);
3286
3287 /*
3288 * Attempt to replace it with the next iocbq
3289 * in the tx queue
3290 */
3291 iocbq = emlxs_tx_get(cp, 0);
3292 }
3293
3294 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3295 } else {
3296 iocbq = emlxs_tx_get(cp, 1);
3297 }
3298
3299 sendit:
3300 /* Process each iocbq */
3301 while (iocbq) {
3302 sbp = iocbq->sbp;
3303
3304 #ifdef NODE_THROTTLE_SUPPORT
3305 if (sbp && sbp->node && sbp->node->io_throttle) {
3306 node_throttle = sbp->node->io_throttle -
3307 sbp->node->io_active;
3308 if (node_throttle <= 0) {
3309 /* Node is busy */
3310 /* Queue this iocb and get next iocb from */
3311 /* channel */
3312
3313 if (!marked_node) {
3314 marked_node = sbp->node;
3315 }
3316
3317 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3318 emlxs_tx_put(iocbq, 0);
3319
3320 if (cp->nodeq.q_first == marked_node) {
3321 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3322 goto busy;
3323 }
3324
3325 iocbq = emlxs_tx_get(cp, 0);
3326 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3327 continue;
3328 }
3329 }
3330 marked_node = 0;
3331 #endif /* NODE_THROTTLE_SUPPORT */
3332
3333 wqe = &iocbq->wqe;
3334 #ifdef DEBUG_FASTPATH
3335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3336 "ISSUE QID %d WQE iotag:%x xri:%d", wq->qid,
3337 wqe->RequestTag, wqe->XRITag);
3338 #endif /* DEBUG_FASTPATH */
3339
3340 if (sbp) {
3341 /* If exchange removed after wqe was prep'ed, drop it */
3342 if (!(sbp->xrip)) {
3343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3344 "Xmit WQE iotag:%x xri:%d aborted",
3345 wqe->RequestTag, wqe->XRITag);
3346
3347 /* Get next iocb from the tx queue */
3348 iocbq = emlxs_tx_get(cp, 1);
3349 continue;
3350 }
3351
3352 if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
3353
3354 /* Perform delay */
3355 if ((channelno == hba->channel_els) &&
3356 !(iocbq->flag & IOCB_FCP_CMD)) {
3357 drv_usecwait(100000);
3358 } else {
3359 drv_usecwait(20000);
3360 }
3361 }
3362
3363 /* Check for ULP pkt request */
3364 mutex_enter(&sbp->mtx);
3365
3366 if (sbp->node == NULL) {
3367 /* Set node to base node by default */
3368 iocbq->node = (void *)&port->node_base;
3369 sbp->node = (void *)&port->node_base;
3370 }
3371
3372 sbp->pkt_flags |= PACKET_IN_CHIPQ;
3373 mutex_exit(&sbp->mtx);
3374
3375 atomic_inc_32(&hba->io_active);
3376 #ifdef NODE_THROTTLE_SUPPORT
3377 if (sbp->node) {
3378 atomic_inc_32(&sbp->node->io_active);
3379 }
3380 #endif /* NODE_THROTTLE_SUPPORT */
3381
3382 sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
3383 #ifdef SFCT_SUPPORT
3384 #ifdef FCT_IO_TRACE
3385 if (sbp->fct_cmd) {
3386 emlxs_fct_io_trace(port, sbp->fct_cmd,
3387 EMLXS_FCT_IOCB_ISSUED);
3388 emlxs_fct_io_trace(port, sbp->fct_cmd,
3389 icmd->ULPCOMMAND);
3390 }
3391 #endif /* FCT_IO_TRACE */
3392 #endif /* SFCT_SUPPORT */
3393 cp->hbaSendCmd_sbp++;
3394 iocbq->channel = cp;
3395 } else {
3396 cp->hbaSendCmd++;
3397 }
3398
3399 flag = iocbq->flag;
3400
3401 /*
3402 * At this point, we have a command ring slot available
3403 * and an iocb to send
3404 */
3405 wq->release_depth--;
3406 if (wq->release_depth == 0) {
3407 wq->release_depth = WQE_RELEASE_DEPTH;
3408 wqe->WQEC = 1;
3409 }
3410
3411 HBASTATS.IocbIssued[channelno]++;
3412 wq->num_proc++;
3413
3414 /* Send the iocb */
3415 wqeslot = (emlxs_wqe_t *)wq->addr.virt;
3416 wqeslot += wq->host_index;
3417
3418 wqe->CQId = wq->cqid;
3419 if (hba->sli.sli4.param.PHWQ) {
3420 WQE_PHWQ_WQID(wqe, wq->qid);
3421 }
3422 BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
3423 sizeof (emlxs_wqe_t));
3424 #ifdef DEBUG_WQE
3425 emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
3426 #endif /* DEBUG_WQE */
3427 offset = (off_t)((uint64_t)((unsigned long)
3428 wq->addr.virt) -
3429 (uint64_t)((unsigned long)
3430 hba->sli.sli4.slim2.virt));
3431
3432 EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
3433 4096, DDI_DMA_SYNC_FORDEV);
3434
3435 /*
3436 * After this, the sbp / iocb / wqe should not be
3437 * accessed in the xmit path.
3438 */
3439
3440 /* Ring the WQ Doorbell */
3441 emlxs_sli4_write_wqdb(hba, wq->qid, 1, wq->host_index);
3442 wq->host_index = next_wqe;
3443
3444 if (!sbp) {
3445 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3446 }
3447
3448 if (iocbq && (!(flag & IOCB_SPECIAL))) {
3449 /* Check if HBA is full */
3450 throttle = hba->io_throttle - hba->io_active;
3451 if (throttle <= 0) {
3452 goto busy;
3453 }
3454 }
3455
3456 /* Check to see if we have room for another WQE */
3457 next_wqe++;
3458 if (next_wqe >= wq->max_index) {
3459 next_wqe = 0;
3460 }
3461
3462 if (next_wqe == wq->port_index) {
3463 /* Queue it for later */
3464 goto busy;
3465 }
3466
3467 /* Get the next iocb from the tx queue if there is one */
3468 iocbq = emlxs_tx_get(cp, 1);
3469 }
3470
3471 mutex_exit(&EMLXS_QUE_LOCK(channelno));
3472
3473 return;
3474
3475 busy:
3476 wq->num_busy++;
3477 if (throttle <= 0) {
3478 HBASTATS.IocbThrottled++;
3479 } else {
3480 HBASTATS.IocbRingFull[channelno]++;
3481 }
3482
3483 mutex_exit(&EMLXS_QUE_LOCK(channelno));
3484
3485 return;
3486
3487 } /* emlxs_sli4_issue_iocb_cmd() */
3488
3489
3490 /*ARGSUSED*/
3491 static uint32_t
emlxs_sli4_issue_mq(emlxs_port_t * port,MAILBOX4 * mqe,MAILBOX * mb,uint32_t tmo)3492 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
3493 uint32_t tmo)
3494 {
3495 emlxs_hba_t *hba = HBA;
3496 MAILBOXQ *mbq;
3497 MAILBOX4 *mb4;
3498 MATCHMAP *mp;
3499 uint32_t *iptr;
3500 off_t offset;
3501
3502 mbq = (MAILBOXQ *)mb;
3503 mb4 = (MAILBOX4 *)mb;
3504 mp = (MATCHMAP *) mbq->nonembed;
3505 hba->mbox_mqe = (void *)mqe;
3506
3507 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3508 (mb4->un.varSLIConfig.be.embedded)) {
3509 /*
3510 * If this is an embedded mbox, everything should fit
3511 * into the mailbox area.
3512 */
3513 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3514 MAILBOX_CMD_SLI4_BSIZE);
3515
3516 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3517 4096, DDI_DMA_SYNC_FORDEV);
3518
3519 if (mb->mbxCommand != MBX_HEARTBEAT) {
3520 emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
3521 18, 0);
3522 }
3523 } else {
3524 /* SLI_CONFIG and non-embedded */
3525
3526 /*
3527 * If this is not embedded, the MQ area
3528 * MUST contain a SGE pointer to a larger area for the
3529 * non-embedded mailbox command.
3530 * mp will point to the actual mailbox command which
3531 * should be copied into the non-embedded area.
3532 */
3533 mb4->un.varSLIConfig.be.sge_cnt = 1;
3534 mb4->un.varSLIConfig.be.payload_length = mp->size;
3535 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3536 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
3537 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
3538 *iptr = mp->size;
3539
3540 BE_SWAP32_BUFFER(mp->virt, mp->size);
3541
3542 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3543 DDI_DMA_SYNC_FORDEV);
3544
3545 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3546 MAILBOX_CMD_SLI4_BSIZE);
3547
3548 offset = (off_t)((uint64_t)((unsigned long)
3549 hba->sli.sli4.mq.addr.virt) -
3550 (uint64_t)((unsigned long)
3551 hba->sli.sli4.slim2.virt));
3552
3553 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3554 4096, DDI_DMA_SYNC_FORDEV);
3555
3556 emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
3557 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3558 "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
3559 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3560 }
3561
3562 /* Ring the MQ Doorbell */
3563 if (mb->mbxCommand != MBX_HEARTBEAT) {
3564 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3565 "MQ RING: Qid %04x", hba->sli.sli4.mq.qid);
3566 }
3567
3568 emlxs_sli4_write_mqdb(hba, hba->sli.sli4.mq.qid, 1);
3569
3570 return (MBX_SUCCESS);
3571
3572 } /* emlxs_sli4_issue_mq() */
3573
3574
3575 /*ARGSUSED*/
3576 static uint32_t
emlxs_sli4_issue_bootstrap(emlxs_hba_t * hba,MAILBOX * mb,uint32_t tmo)3577 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
3578 {
3579 emlxs_port_t *port = &PPORT;
3580 MAILBOXQ *mbq;
3581 MAILBOX4 *mb4;
3582 MATCHMAP *mp = NULL;
3583 uint32_t *iptr;
3584 int nonembed = 0;
3585
3586 mbq = (MAILBOXQ *)mb;
3587 mb4 = (MAILBOX4 *)mb;
3588 mp = (MATCHMAP *) mbq->nonembed;
3589 hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
3590
3591 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3592 (mb4->un.varSLIConfig.be.embedded)) {
3593 /*
3594 * If this is an embedded mbox, everything should fit
3595 * into the bootstrap mailbox area.
3596 */
3597 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3598 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3599 MAILBOX_CMD_SLI4_BSIZE);
3600
3601 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3602 MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
3603 emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
3604 } else {
3605 /*
3606 * If this is not embedded, the bootstrap mailbox area
3607 * MUST contain a SGE pointer to a larger area for the
3608 * non-embedded mailbox command.
3609 * mp will point to the actual mailbox command which
3610 * should be copied into the non-embedded area.
3611 */
3612 nonembed = 1;
3613 mb4->un.varSLIConfig.be.sge_cnt = 1;
3614 mb4->un.varSLIConfig.be.payload_length = mp->size;
3615 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3616 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
3617 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
3618 *iptr = mp->size;
3619
3620 BE_SWAP32_BUFFER(mp->virt, mp->size);
3621
3622 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3623 DDI_DMA_SYNC_FORDEV);
3624
3625 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3626 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3627 MAILBOX_CMD_SLI4_BSIZE);
3628
3629 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3630 EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3631 DDI_DMA_SYNC_FORDEV);
3632
3633 emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
3634 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3635 "Extension Addr %p %p", mp->phys,
3636 (uint32_t *)((uint8_t *)mp->virt));
3637 iptr = (uint32_t *)((uint8_t *)mp->virt);
3638 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3639 }
3640
3641
3642 /* NOTE: tmo is in 10ms ticks */
3643 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
3644 return (MBX_TIMEOUT);
3645 }
3646
3647 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3648 (mb4->un.varSLIConfig.be.embedded)) {
3649 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3650 MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3651
3652 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3653 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3654 MAILBOX_CMD_SLI4_BSIZE);
3655
3656 emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
3657
3658 } else {
3659 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3660 EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3661 DDI_DMA_SYNC_FORKERNEL);
3662
3663 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3664 DDI_DMA_SYNC_FORKERNEL);
3665
3666 BE_SWAP32_BUFFER(mp->virt, mp->size);
3667
3668 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3669 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3670 MAILBOX_CMD_SLI4_BSIZE);
3671
3672 emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
3673 iptr = (uint32_t *)((uint8_t *)mp->virt);
3674 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3675 }
3676
3677 #ifdef FMA_SUPPORT
3678 if (nonembed && mp) {
3679 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
3680 != DDI_FM_OK) {
3681 EMLXS_MSGF(EMLXS_CONTEXT,
3682 &emlxs_invalid_dma_handle_msg,
3683 "sli4_issue_bootstrap: mp_hdl=%p",
3684 mp->dma_handle);
3685 return (MBXERR_DMA_ERROR);
3686 }
3687 }
3688
3689 if (emlxs_fm_check_dma_handle(hba,
3690 hba->sli.sli4.bootstrapmb.dma_handle)
3691 != DDI_FM_OK) {
3692 EMLXS_MSGF(EMLXS_CONTEXT,
3693 &emlxs_invalid_dma_handle_msg,
3694 "sli4_issue_bootstrap: hdl=%p",
3695 hba->sli.sli4.bootstrapmb.dma_handle);
3696 return (MBXERR_DMA_ERROR);
3697 }
3698 #endif
3699
3700 return (MBX_SUCCESS);
3701
3702 } /* emlxs_sli4_issue_bootstrap() */
3703
3704
3705 /*ARGSUSED*/
3706 static uint32_t
emlxs_sli4_issue_mbox_cmd(emlxs_hba_t * hba,MAILBOXQ * mbq,int32_t flag,uint32_t tmo)3707 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3708 uint32_t tmo)
3709 {
3710 emlxs_port_t *port;
3711 MAILBOX4 *mb4;
3712 MAILBOX *mb;
3713 mbox_rsp_hdr_t *hdr_rsp;
3714 MATCHMAP *mp;
3715 uint32_t *iptr;
3716 uint32_t rc;
3717 uint32_t i;
3718 uint32_t tmo_local;
3719
3720 if (!mbq->port) {
3721 mbq->port = &PPORT;
3722 }
3723
3724 port = (emlxs_port_t *)mbq->port;
3725
3726 mb4 = (MAILBOX4 *)mbq;
3727 mb = (MAILBOX *)mbq;
3728
3729 mb->mbxStatus = MBX_SUCCESS;
3730 rc = MBX_SUCCESS;
3731
3732 /* Check for minimum timeouts */
3733 switch (mb->mbxCommand) {
3734 /* Mailbox commands that erase/write flash */
3735 case MBX_DOWN_LOAD:
3736 case MBX_UPDATE_CFG:
3737 case MBX_LOAD_AREA:
3738 case MBX_LOAD_EXP_ROM:
3739 case MBX_WRITE_NV:
3740 case MBX_FLASH_WR_ULA:
3741 case MBX_DEL_LD_ENTRY:
3742 case MBX_LOAD_SM:
3743 case MBX_DUMP_MEMORY:
3744 case MBX_WRITE_VPARMS:
3745 case MBX_ACCESS_VDATA:
3746 if (tmo < 300) {
3747 tmo = 300;
3748 }
3749 break;
3750
3751 case MBX_SLI_CONFIG: {
3752 mbox_req_hdr_t *hdr_req;
3753
3754 hdr_req = (mbox_req_hdr_t *)
3755 &mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3756
3757 if (hdr_req->subsystem == IOCTL_SUBSYSTEM_COMMON) {
3758 switch (hdr_req->opcode) {
3759 case COMMON_OPCODE_WRITE_OBJ:
3760 case COMMON_OPCODE_READ_OBJ:
3761 case COMMON_OPCODE_READ_OBJ_LIST:
3762 case COMMON_OPCODE_DELETE_OBJ:
3763 case COMMON_OPCODE_SET_BOOT_CFG:
3764 case COMMON_OPCODE_GET_PROFILE_CFG:
3765 case COMMON_OPCODE_SET_PROFILE_CFG:
3766 case COMMON_OPCODE_GET_PROFILE_LIST:
3767 case COMMON_OPCODE_SET_ACTIVE_PROFILE:
3768 case COMMON_OPCODE_GET_PROFILE_CAPS:
3769 case COMMON_OPCODE_GET_MR_PROFILE_CAPS:
3770 case COMMON_OPCODE_SET_MR_PROFILE_CAPS:
3771 case COMMON_OPCODE_SET_FACTORY_PROFILE_CFG:
3772 case COMMON_OPCODE_SEND_ACTIVATION:
3773 case COMMON_OPCODE_RESET_LICENSES:
3774 case COMMON_OPCODE_SET_PHYSICAL_LINK_CFG_V1:
3775 case COMMON_OPCODE_GET_VPD_DATA:
3776 if (tmo < 300) {
3777 tmo = 300;
3778 }
3779 break;
3780 default:
3781 if (tmo < 30) {
3782 tmo = 30;
3783 }
3784 }
3785 } else if (hdr_req->subsystem == IOCTL_SUBSYSTEM_FCOE) {
3786 switch (hdr_req->opcode) {
3787 case FCOE_OPCODE_SET_FCLINK_SETTINGS:
3788 if (tmo < 300) {
3789 tmo = 300;
3790 }
3791 break;
3792 default:
3793 if (tmo < 30) {
3794 tmo = 30;
3795 }
3796 }
3797 } else {
3798 if (tmo < 30) {
3799 tmo = 30;
3800 }
3801 }
3802
3803 /*
3804 * Also: VENDOR_MANAGE_FFV (0x13, 0x02) (not currently used)
3805 */
3806
3807 break;
3808 }
3809 default:
3810 if (tmo < 30) {
3811 tmo = 30;
3812 }
3813 break;
3814 }
3815
3816 /* Convert tmo seconds to 10 millisecond tics */
3817 tmo_local = tmo * 100;
3818
3819 mutex_enter(&EMLXS_PORT_LOCK);
3820
3821 /* Adjust wait flag */
3822 if (flag != MBX_NOWAIT) {
3823 if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
3824 flag = MBX_SLEEP;
3825 } else {
3826 flag = MBX_POLL;
3827 }
3828 } else {
3829 /* Must have interrupts enabled to perform MBX_NOWAIT */
3830 if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
3831
3832 mb->mbxStatus = MBX_HARDWARE_ERROR;
3833 mutex_exit(&EMLXS_PORT_LOCK);
3834
3835 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3836 "Interrupts disabled. %s failed.",
3837 emlxs_mb_cmd_xlate(mb->mbxCommand));
3838
3839 return (MBX_HARDWARE_ERROR);
3840 }
3841 }
3842
3843 /* Check for hardware error ; special case SLI_CONFIG */
3844 if ((hba->flag & FC_HARDWARE_ERROR) &&
3845 ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3846 (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3847 COMMON_OPCODE_RESET))) {
3848 mb->mbxStatus = MBX_HARDWARE_ERROR;
3849
3850 mutex_exit(&EMLXS_PORT_LOCK);
3851
3852 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3853 "Hardware error reported. %s failed. status=%x mb=%p",
3854 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3855
3856 return (MBX_HARDWARE_ERROR);
3857 }
3858
3859 if (hba->mbox_queue_flag) {
3860 /* If we are not polling, then queue it for later */
3861 if (flag == MBX_NOWAIT) {
3862 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3863 "Busy. %s: mb=%p NoWait.",
3864 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3865
3866 emlxs_mb_put(hba, mbq);
3867
3868 HBASTATS.MboxBusy++;
3869
3870 mutex_exit(&EMLXS_PORT_LOCK);
3871
3872 return (MBX_BUSY);
3873 }
3874
3875 while (hba->mbox_queue_flag) {
3876 mutex_exit(&EMLXS_PORT_LOCK);
3877
3878 if (tmo_local-- == 0) {
3879 EMLXS_MSGF(EMLXS_CONTEXT,
3880 &emlxs_mbox_event_msg,
3881 "Timeout. %s: mb=%p tmo=%d Waiting.",
3882 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3883 tmo);
3884
3885 /* Non-lethalStatus mailbox timeout */
3886 /* Does not indicate a hardware error */
3887 mb->mbxStatus = MBX_TIMEOUT;
3888 return (MBX_TIMEOUT);
3889 }
3890
3891 BUSYWAIT_MS(10);
3892 mutex_enter(&EMLXS_PORT_LOCK);
3893
3894 /* Check for hardware error ; special case SLI_CONFIG */
3895 if ((hba->flag & FC_HARDWARE_ERROR) &&
3896 ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3897 (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3898 COMMON_OPCODE_RESET))) {
3899 mb->mbxStatus = MBX_HARDWARE_ERROR;
3900
3901 mutex_exit(&EMLXS_PORT_LOCK);
3902
3903 EMLXS_MSGF(EMLXS_CONTEXT,
3904 &emlxs_mbox_detail_msg,
3905 "Hardware error reported. %s failed. "
3906 "status=%x mb=%p",
3907 emlxs_mb_cmd_xlate(mb->mbxCommand),
3908 mb->mbxStatus, mb);
3909
3910 return (MBX_HARDWARE_ERROR);
3911 }
3912 }
3913 }
3914
3915 /* Initialize mailbox area */
3916 emlxs_mb_init(hba, mbq, flag, tmo);
3917
3918 if (mb->mbxCommand == MBX_DOWN_LINK) {
3919 hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
3920 }
3921
3922 mutex_exit(&EMLXS_PORT_LOCK);
3923 switch (flag) {
3924
3925 case MBX_NOWAIT:
3926 if (mb->mbxCommand != MBX_HEARTBEAT) {
3927 if (mb->mbxCommand != MBX_DOWN_LOAD
3928 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3929 EMLXS_MSGF(EMLXS_CONTEXT,
3930 &emlxs_mbox_detail_msg,
3931 "Sending. %s: mb=%p NoWait. embedded %d",
3932 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3933 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3934 (mb4->un.varSLIConfig.be.embedded)));
3935 }
3936 }
3937
3938 iptr = hba->sli.sli4.mq.addr.virt;
3939 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3940 hba->sli.sli4.mq.host_index++;
3941 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3942 hba->sli.sli4.mq.host_index = 0;
3943 }
3944
3945 if (mbq->bp) {
3946 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3947 "BDE virt %p phys %p size x%x",
3948 ((MATCHMAP *)mbq->bp)->virt,
3949 ((MATCHMAP *)mbq->bp)->phys,
3950 ((MATCHMAP *)mbq->bp)->size);
3951 emlxs_data_dump(port, "DATA",
3952 (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
3953 }
3954 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3955 break;
3956
3957 case MBX_POLL:
3958 if (mb->mbxCommand != MBX_DOWN_LOAD
3959 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3961 "Sending. %s: mb=%p Poll. embedded %d",
3962 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3963 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3964 (mb4->un.varSLIConfig.be.embedded)));
3965 }
3966
3967 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3968
3969 /* Clean up the mailbox area */
3970 if (rc == MBX_TIMEOUT) {
3971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3972 "Timeout. %s: mb=%p tmo=%x Poll. embedded %d",
3973 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3974 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3975 (mb4->un.varSLIConfig.be.embedded)));
3976
3977 hba->flag |= FC_MBOX_TIMEOUT;
3978 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3979 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3980
3981 } else {
3982 if (mb->mbxCommand != MBX_DOWN_LOAD
3983 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3984 EMLXS_MSGF(EMLXS_CONTEXT,
3985 &emlxs_mbox_detail_msg,
3986 "Completed. %s: mb=%p status=%x rc=%x"
3987 " Poll. embedded %d",
3988 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3989 rc, mb->mbxStatus,
3990 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3991 (mb4->un.varSLIConfig.be.embedded)));
3992 }
3993
3994 /* Process the result */
3995 if (!(mbq->flag & MBQ_PASSTHRU)) {
3996 if (mbq->mbox_cmpl) {
3997 (void) (mbq->mbox_cmpl)(hba, mbq);
3998 }
3999 }
4000
4001 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
4002 }
4003
4004 mp = (MATCHMAP *)mbq->nonembed;
4005 if (mp) {
4006 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
4007 if (hdr_rsp->status) {
4008 EMLXS_MSGF(EMLXS_CONTEXT,
4009 &emlxs_mbox_detail_msg,
4010 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
4011 emlxs_mb_cmd_xlate(mb->mbxCommand),
4012 hdr_rsp->status, hdr_rsp->extra_status);
4013
4014 mb->mbxStatus = MBX_NONEMBED_ERROR;
4015 }
4016 }
4017 rc = mb->mbxStatus;
4018
4019 /* Attempt to send pending mailboxes */
4020 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
4021 if (mbq) {
4022 /* Attempt to send pending mailboxes */
4023 i = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
4024 if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
4025 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4026 }
4027 }
4028 break;
4029
4030 case MBX_SLEEP:
4031 if (mb->mbxCommand != MBX_DOWN_LOAD
4032 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
4033 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
4034 "Sending. %s: mb=%p Sleep. embedded %d",
4035 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
4036 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
4037 (mb4->un.varSLIConfig.be.embedded)));
4038 }
4039
4040 iptr = hba->sli.sli4.mq.addr.virt;
4041 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
4042 hba->sli.sli4.mq.host_index++;
4043 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
4044 hba->sli.sli4.mq.host_index = 0;
4045 }
4046
4047 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
4048
4049 if (rc != MBX_SUCCESS) {
4050 break;
4051 }
4052
4053 /* Wait for completion */
4054 /* The driver clock is timing the mailbox. */
4055
4056 mutex_enter(&EMLXS_MBOX_LOCK);
4057 while (!(mbq->flag & MBQ_COMPLETED)) {
4058 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
4059 }
4060 mutex_exit(&EMLXS_MBOX_LOCK);
4061
4062 mp = (MATCHMAP *)mbq->nonembed;
4063 if (mp) {
4064 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
4065 if (hdr_rsp->status) {
4066 EMLXS_MSGF(EMLXS_CONTEXT,
4067 &emlxs_mbox_detail_msg,
4068 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
4069 emlxs_mb_cmd_xlate(mb->mbxCommand),
4070 hdr_rsp->status, hdr_rsp->extra_status);
4071
4072 mb->mbxStatus = MBX_NONEMBED_ERROR;
4073 }
4074 }
4075 rc = mb->mbxStatus;
4076
4077 if (rc == MBX_TIMEOUT) {
4078 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
4079 "Timeout. %s: mb=%p tmo=%x Sleep. embedded %d",
4080 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
4081 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
4082 (mb4->un.varSLIConfig.be.embedded)));
4083 } else {
4084 if (mb->mbxCommand != MBX_DOWN_LOAD
4085 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
4086 EMLXS_MSGF(EMLXS_CONTEXT,
4087 &emlxs_mbox_detail_msg,
4088 "Completed. %s: mb=%p status=%x Sleep. "
4089 "embedded %d",
4090 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
4091 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
4092 (mb4->un.varSLIConfig.be.embedded)));
4093 }
4094 }
4095 break;
4096 }
4097
4098 return (rc);
4099
4100 } /* emlxs_sli4_issue_mbox_cmd() */
4101
4102
4103
4104 /*ARGSUSED*/
4105 static uint32_t
emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t * hba,MAILBOXQ * mbq,int32_t flag,uint32_t tmo)4106 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
4107 uint32_t tmo)
4108 {
4109 emlxs_port_t *port = &PPORT;
4110 MAILBOX *mb;
4111 mbox_rsp_hdr_t *hdr_rsp;
4112 MATCHMAP *mp;
4113 uint32_t rc;
4114 uint32_t tmo_local;
4115
4116 mb = (MAILBOX *)mbq;
4117
4118 mb->mbxStatus = MBX_SUCCESS;
4119 rc = MBX_SUCCESS;
4120
4121 if (tmo < 30) {
4122 tmo = 30;
4123 }
4124
4125 /* Convert tmo seconds to 10 millisecond tics */
4126 tmo_local = tmo * 100;
4127
4128 flag = MBX_POLL;
4129
4130 /* Check for hardware error */
4131 if (hba->flag & FC_HARDWARE_ERROR) {
4132 mb->mbxStatus = MBX_HARDWARE_ERROR;
4133 return (MBX_HARDWARE_ERROR);
4134 }
4135
4136 /* Initialize mailbox area */
4137 emlxs_mb_init(hba, mbq, flag, tmo);
4138
4139 switch (flag) {
4140
4141 case MBX_POLL:
4142
4143 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
4144
4145 /* Clean up the mailbox area */
4146 if (rc == MBX_TIMEOUT) {
4147 hba->flag |= FC_MBOX_TIMEOUT;
4148 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4149 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
4150
4151 } else {
4152 /* Process the result */
4153 if (!(mbq->flag & MBQ_PASSTHRU)) {
4154 if (mbq->mbox_cmpl) {
4155 (void) (mbq->mbox_cmpl)(hba, mbq);
4156 }
4157 }
4158
4159 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
4160 }
4161
4162 mp = (MATCHMAP *)mbq->nonembed;
4163 if (mp) {
4164 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
4165 if (hdr_rsp->status) {
4166 EMLXS_MSGF(EMLXS_CONTEXT,
4167 &emlxs_mbox_detail_msg,
4168 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
4169 emlxs_mb_cmd_xlate(mb->mbxCommand),
4170 hdr_rsp->status, hdr_rsp->extra_status);
4171
4172 mb->mbxStatus = MBX_NONEMBED_ERROR;
4173 }
4174 }
4175 rc = mb->mbxStatus;
4176
4177 break;
4178 }
4179
4180 return (rc);
4181
4182 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
4183
4184
4185
4186 #ifdef SFCT_SUPPORT
4187 /*ARGSUSED*/
4188 extern uint32_t
emlxs_sli4_prep_fct_iocb(emlxs_port_t * port,emlxs_buf_t * cmd_sbp,int channel)4189 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
4190 {
4191 emlxs_hba_t *hba = HBA;
4192 emlxs_config_t *cfg = &CFG;
4193 fct_cmd_t *fct_cmd;
4194 stmf_data_buf_t *dbuf;
4195 scsi_task_t *fct_task;
4196 fc_packet_t *pkt;
4197 CHANNEL *cp;
4198 XRIobj_t *xrip;
4199 emlxs_node_t *ndlp;
4200 IOCBQ *iocbq;
4201 IOCB *iocb;
4202 emlxs_wqe_t *wqe;
4203 ULP_SGE64 stage_sge;
4204 ULP_SGE64 *sge;
4205 RPIobj_t *rpip;
4206 int32_t sge_size;
4207 uint64_t sge_addr;
4208 uint32_t did;
4209 uint32_t timeout;
4210
4211 ddi_dma_cookie_t *cp_cmd;
4212
4213 pkt = PRIV2PKT(cmd_sbp);
4214
4215 cp = (CHANNEL *)cmd_sbp->channel;
4216
4217 iocbq = &cmd_sbp->iocbq;
4218 iocb = &iocbq->iocb;
4219
4220 did = cmd_sbp->did;
4221 if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
4222
4223 ndlp = cmd_sbp->node;
4224 rpip = EMLXS_NODE_TO_RPI(port, ndlp);
4225
4226 if (!rpip) {
4227 /* Use the fabric rpi */
4228 rpip = port->vpip->fabric_rpip;
4229 }
4230
4231 /* Next allocate an Exchange for this command */
4232 xrip = emlxs_sli4_alloc_xri(port, cmd_sbp, rpip,
4233 EMLXS_XRI_SOL_BLS_TYPE);
4234
4235 if (!xrip) {
4236 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4237 "Adapter Busy. Unable to allocate exchange. "
4238 "did=0x%x", did);
4239
4240 return (FC_TRAN_BUSY);
4241 }
4242
4243 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4244 "FCT Abort Request: xri=%d iotag=%d sbp=%p rxid=%x",
4245 xrip->XRI, xrip->iotag, cmd_sbp, pkt->pkt_cmd_fhdr.rx_id);
4246
4247 cmd_sbp->xrip = xrip;
4248
4249 cp->ulpSendCmd++;
4250
4251 /* Initalize iocbq */
4252 iocbq->port = (void *)port;
4253 iocbq->node = (void *)ndlp;
4254 iocbq->channel = (void *)cp;
4255
4256 /*
4257 * Don't give the abort priority, we want the IOCB
4258 * we are aborting to be processed first.
4259 */
4260 iocbq->flag |= IOCB_SPECIAL;
4261
4262 wqe = &iocbq->wqe;
4263 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4264
4265 wqe = &iocbq->wqe;
4266 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4267 wqe->RequestTag = xrip->iotag;
4268 wqe->AbortTag = pkt->pkt_cmd_fhdr.rx_id;
4269 wqe->Command = CMD_ABORT_XRI_CX;
4270 wqe->Class = CLASS3;
4271 wqe->CQId = 0xffff;
4272 wqe->CmdType = WQE_TYPE_ABORT;
4273
4274 if (hba->state >= FC_LINK_UP) {
4275 wqe->un.Abort.IA = 0;
4276 } else {
4277 wqe->un.Abort.IA = 1;
4278 }
4279
4280 /* Set the pkt timer */
4281 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
4282 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
4283
4284 return (IOERR_SUCCESS);
4285
4286 } else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
4287
4288 timeout = pkt->pkt_timeout;
4289 ndlp = cmd_sbp->node;
4290 if (!ndlp) {
4291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4292 "Unable to find rpi. did=0x%x", did);
4293
4294 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4295 IOERR_INVALID_RPI, 0);
4296 return (0xff);
4297 }
4298
4299 cp->ulpSendCmd++;
4300
4301 /* Initalize iocbq */
4302 iocbq->port = (void *)port;
4303 iocbq->node = (void *)ndlp;
4304 iocbq->channel = (void *)cp;
4305
4306 wqe = &iocbq->wqe;
4307 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4308
4309 xrip = emlxs_sli4_register_xri(port, cmd_sbp,
4310 pkt->pkt_cmd_fhdr.rx_id, did);
4311
4312 if (!xrip) {
4313 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4314 "Unable to register xri %x. did=0x%x",
4315 pkt->pkt_cmd_fhdr.rx_id, did);
4316
4317 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4318 IOERR_NO_XRI, 0);
4319 return (0xff);
4320 }
4321
4322 cmd_sbp->iotag = xrip->iotag;
4323 cmd_sbp->channel = cp;
4324
4325 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4326 cp_cmd = pkt->pkt_cmd_cookie;
4327 #else
4328 cp_cmd = &pkt->pkt_cmd_cookie;
4329 #endif /* >= EMLXS_MODREV3 */
4330
4331 sge_size = pkt->pkt_cmdlen;
4332 /* Make size a multiple of 4 */
4333 if (sge_size & 3) {
4334 sge_size = (sge_size + 3) & 0xfffffffc;
4335 }
4336 sge_addr = cp_cmd->dmac_laddress;
4337 sge = xrip->SGList->virt;
4338
4339 stage_sge.addrHigh = PADDR_HI(sge_addr);
4340 stage_sge.addrLow = PADDR_LO(sge_addr);
4341 stage_sge.length = sge_size;
4342 stage_sge.offset = 0;
4343 stage_sge.type = 0;
4344 stage_sge.last = 1;
4345
4346 /* Copy staged SGE into SGL */
4347 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4348 (uint8_t *)sge, sizeof (ULP_SGE64));
4349
4350 /* Words 0-3 */
4351 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4352 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4353 wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4354 wqe->un.FcpCmd.PayloadLength = sge_size;
4355
4356 /* Word 6 */
4357 wqe->ContextTag = ndlp->nlp_Rpi;
4358 wqe->XRITag = xrip->XRI;
4359
4360 /* Word 7 */
4361 wqe->Command = iocb->ULPCOMMAND;
4362 wqe->Class = cmd_sbp->class;
4363 wqe->ContextType = WQE_RPI_CONTEXT;
4364 wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4365
4366 /* Word 8 */
4367 wqe->AbortTag = 0;
4368
4369 /* Word 9 */
4370 wqe->RequestTag = xrip->iotag;
4371 wqe->OXId = (uint16_t)xrip->rx_id;
4372
4373 /* Word 10 */
4374 if (xrip->flag & EMLXS_XRI_BUSY) {
4375 wqe->XC = 1;
4376 }
4377
4378 if (!(hba->sli.sli4.param.PHWQ)) {
4379 wqe->QOSd = 1;
4380 wqe->DBDE = 1; /* Data type for BDE 0 */
4381 }
4382
4383 /* Word 11 */
4384 wqe->CmdType = WQE_TYPE_TRSP;
4385 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4386
4387 /* Set the pkt timer */
4388 cmd_sbp->ticks = hba->timer_tics + timeout +
4389 ((timeout > 0xff) ? 0 : 10);
4390
4391 if (pkt->pkt_cmdlen) {
4392 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
4393 DDI_DMA_SYNC_FORDEV);
4394 }
4395
4396 return (IOERR_SUCCESS);
4397 }
4398
4399 fct_cmd = cmd_sbp->fct_cmd;
4400 did = fct_cmd->cmd_rportid;
4401 dbuf = cmd_sbp->fct_buf;
4402 fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
4403 ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
4404 if (!ndlp) {
4405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4406 "Unable to find rpi. did=0x%x", did);
4407
4408 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4409 IOERR_INVALID_RPI, 0);
4410 return (0xff);
4411 }
4412
4413
4414 /* Initalize iocbq */
4415 iocbq->port = (void *) port;
4416 iocbq->node = (void *)ndlp;
4417 iocbq->channel = (void *) cp;
4418
4419 wqe = &iocbq->wqe;
4420 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4421
4422 xrip = cmd_sbp->xrip;
4423 if (!xrip) {
4424 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4425 "Unable to find xri. did=0x%x", did);
4426
4427 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4428 IOERR_NO_XRI, 0);
4429 return (0xff);
4430 }
4431
4432 if (emlxs_sli4_register_xri(port, cmd_sbp,
4433 xrip->XRI, ndlp->nlp_DID) == NULL) {
4434 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4435 "Unable to register xri. did=0x%x", did);
4436
4437 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4438 IOERR_NO_XRI, 0);
4439 return (0xff);
4440 }
4441 cmd_sbp->iotag = xrip->iotag;
4442 cmd_sbp->channel = cp;
4443
4444 if (cfg[CFG_TIMEOUT_ENABLE].current) {
4445 timeout =
4446 ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
4447 } else {
4448 timeout = 0x80000000;
4449 }
4450 cmd_sbp->ticks =
4451 hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
4452
4453
4454 iocb->ULPCT = 0;
4455 if (fct_task->task_flags & TF_WRITE_DATA) {
4456 iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
4457 wqe->CmdType = WQE_TYPE_TRECEIVE; /* Word 11 */
4458
4459 } else { /* TF_READ_DATA */
4460
4461 iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
4462 wqe->CmdType = WQE_TYPE_TSEND; /* Word 11 */
4463
4464 if ((dbuf->db_data_size >=
4465 fct_task->task_expected_xfer_length)) {
4466 /* enable auto-rsp AP feature */
4467 wqe->AR = 0x1;
4468 iocb->ULPCT = 0x1; /* for cmpl */
4469 }
4470 }
4471
4472 (void) emlxs_sli4_fct_bde_setup(port, cmd_sbp);
4473
4474 /* Word 6 */
4475 wqe->ContextTag = ndlp->nlp_Rpi;
4476 wqe->XRITag = xrip->XRI;
4477
4478 /* Word 7 */
4479 wqe->Command = iocb->ULPCOMMAND;
4480 wqe->Class = cmd_sbp->class;
4481 wqe->ContextType = WQE_RPI_CONTEXT;
4482 wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4483 wqe->PU = 1;
4484
4485 /* Word 8 */
4486 wqe->AbortTag = 0;
4487
4488 /* Word 9 */
4489 wqe->RequestTag = xrip->iotag;
4490 wqe->OXId = (uint16_t)fct_cmd->cmd_oxid;
4491
4492 /* Word 10 */
4493 if (xrip->flag & EMLXS_XRI_BUSY) {
4494 wqe->XC = 1;
4495 }
4496
4497 if (!(hba->sli.sli4.param.PHWQ)) {
4498 wqe->QOSd = 1;
4499 wqe->DBDE = 1; /* Data type for BDE 0 */
4500 }
4501
4502 /* Word 11 */
4503 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4504
4505 /* Word 12 */
4506 wqe->CmdSpecific = dbuf->db_data_size;
4507
4508 return (IOERR_SUCCESS);
4509
4510 } /* emlxs_sli4_prep_fct_iocb() */
4511 #endif /* SFCT_SUPPORT */
4512
4513
4514 /*ARGSUSED*/
4515 extern uint32_t
emlxs_sli4_prep_fcp_iocb(emlxs_port_t * port,emlxs_buf_t * sbp,int channel)4516 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4517 {
4518 emlxs_hba_t *hba = HBA;
4519 fc_packet_t *pkt;
4520 CHANNEL *cp;
4521 RPIobj_t *rpip;
4522 XRIobj_t *xrip;
4523 emlxs_wqe_t *wqe;
4524 IOCBQ *iocbq;
4525 IOCB *iocb;
4526 NODELIST *node;
4527 uint16_t iotag;
4528 uint32_t did;
4529
4530 pkt = PRIV2PKT(sbp);
4531 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4532 cp = &hba->chan[channel];
4533
4534 iocbq = &sbp->iocbq;
4535 iocbq->channel = (void *) cp;
4536 iocbq->port = (void *) port;
4537
4538 wqe = &iocbq->wqe;
4539 iocb = &iocbq->iocb;
4540 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4541 bzero((void *)iocb, sizeof (IOCB));
4542
4543 /* Find target node object */
4544 node = (NODELIST *)iocbq->node;
4545 rpip = EMLXS_NODE_TO_RPI(port, node);
4546
4547 if (!rpip) {
4548 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4549 "Unable to find rpi. did=0x%x", did);
4550
4551 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4552 IOERR_INVALID_RPI, 0);
4553 return (0xff);
4554 }
4555
4556 sbp->channel = cp;
4557 /* Next allocate an Exchange for this command */
4558 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4559 EMLXS_XRI_SOL_FCP_TYPE);
4560
4561 if (!xrip) {
4562 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4563 "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
4564
4565 return (FC_TRAN_BUSY);
4566 }
4567 sbp->bmp = NULL;
4568 iotag = sbp->iotag;
4569
4570 #ifdef DEBUG_FASTPATH
4571 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4572 "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4573 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4574 #endif /* DEBUG_FASTPATH */
4575
4576 /* Indicate this is a FCP cmd */
4577 iocbq->flag |= IOCB_FCP_CMD;
4578
4579 if (emlxs_sli4_bde_setup(port, sbp)) {
4580 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4581 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4582 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4583
4584 return (FC_TRAN_BUSY);
4585 }
4586
4587 /* DEBUG */
4588 #ifdef DEBUG_FCP
4589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4590 "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList->virt,
4591 xrip->SGList->phys, pkt->pkt_datalen);
4592 emlxs_data_dump(port, "FCP: SGL",
4593 (uint32_t *)xrip->SGList->virt, 32, 0);
4594 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4595 "FCP: CMD virt %p len %d:%d:%d",
4596 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4597 emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4598 #endif /* DEBUG_FCP */
4599
4600 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4601 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4602
4603 /* if device is FCP-2 device, set the following bit */
4604 /* that says to run the FC-TAPE protocol. */
4605 if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4606 wqe->ERP = 1;
4607 }
4608
4609 if (pkt->pkt_datalen == 0) {
4610 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4611 wqe->Command = CMD_FCP_ICMND64_CR;
4612 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4613 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4614 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4615 wqe->Command = CMD_FCP_IREAD64_CR;
4616 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4617 wqe->PU = PARM_XFER_CHECK;
4618 } else {
4619 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4620 wqe->Command = CMD_FCP_IWRITE64_CR;
4621 wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4622 }
4623 wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
4624
4625 if (!(hba->sli.sli4.param.PHWQ)) {
4626 wqe->DBDE = 1; /* Data type for BDE 0 */
4627 }
4628 wqe->ContextTag = rpip->RPI;
4629 wqe->ContextType = WQE_RPI_CONTEXT;
4630 wqe->XRITag = xrip->XRI;
4631 wqe->Timer =
4632 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4633
4634 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4635 wqe->CCPE = 1;
4636 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4637 }
4638
4639 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4640 case FC_TRAN_CLASS2:
4641 wqe->Class = CLASS2;
4642 break;
4643 case FC_TRAN_CLASS3:
4644 default:
4645 wqe->Class = CLASS3;
4646 break;
4647 }
4648 sbp->class = wqe->Class;
4649 wqe->RequestTag = iotag;
4650 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4651
4652 return (FC_SUCCESS);
4653 } /* emlxs_sli4_prep_fcp_iocb() */
4654
4655
4656 /*ARGSUSED*/
4657 static uint32_t
emlxs_sli4_prep_ip_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4658 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4659 {
4660 return (FC_TRAN_BUSY);
4661
4662 } /* emlxs_sli4_prep_ip_iocb() */
4663
4664
4665 /*ARGSUSED*/
4666 static uint32_t
emlxs_sli4_prep_els_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4667 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4668 {
4669 emlxs_hba_t *hba = HBA;
4670 fc_packet_t *pkt;
4671 IOCBQ *iocbq;
4672 IOCB *iocb;
4673 emlxs_wqe_t *wqe;
4674 FCFIobj_t *fcfp;
4675 RPIobj_t *reserved_rpip = NULL;
4676 RPIobj_t *rpip = NULL;
4677 XRIobj_t *xrip;
4678 CHANNEL *cp;
4679 uint32_t did;
4680 uint32_t cmd;
4681 ULP_SGE64 stage_sge;
4682 ULP_SGE64 *sge;
4683 ddi_dma_cookie_t *cp_cmd;
4684 ddi_dma_cookie_t *cp_resp;
4685 emlxs_node_t *node;
4686
4687 pkt = PRIV2PKT(sbp);
4688 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4689
4690 iocbq = &sbp->iocbq;
4691 wqe = &iocbq->wqe;
4692 iocb = &iocbq->iocb;
4693 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4694 bzero((void *)iocb, sizeof (IOCB));
4695 cp = &hba->chan[hba->channel_els];
4696
4697 /* Initalize iocbq */
4698 iocbq->port = (void *) port;
4699 iocbq->channel = (void *) cp;
4700
4701 sbp->channel = cp;
4702 sbp->bmp = NULL;
4703
4704 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4705 cp_cmd = pkt->pkt_cmd_cookie;
4706 cp_resp = pkt->pkt_resp_cookie;
4707 #else
4708 cp_cmd = &pkt->pkt_cmd_cookie;
4709 cp_resp = &pkt->pkt_resp_cookie;
4710 #endif /* >= EMLXS_MODREV3 */
4711
4712 /* CMD payload */
4713 sge = &stage_sge;
4714 sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
4715 sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
4716 sge->length = pkt->pkt_cmdlen;
4717 sge->offset = 0;
4718 sge->type = 0;
4719
4720 cmd = *((uint32_t *)pkt->pkt_cmd);
4721 cmd &= ELS_CMD_MASK;
4722
4723 /* Initalize iocb */
4724 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4725 /* ELS Response */
4726
4727 sbp->xrip = 0;
4728 xrip = emlxs_sli4_register_xri(port, sbp,
4729 pkt->pkt_cmd_fhdr.rx_id, did);
4730
4731 if (!xrip) {
4732 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4733 "Unable to find XRI. rxid=%x",
4734 pkt->pkt_cmd_fhdr.rx_id);
4735
4736 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4737 IOERR_NO_XRI, 0);
4738 return (0xff);
4739 }
4740
4741 rpip = xrip->rpip;
4742
4743 if (!rpip) {
4744 /* This means that we had a node registered */
4745 /* when the unsol request came in but the node */
4746 /* has since been unregistered. */
4747 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4748 "Unable to find RPI. rxid=%x",
4749 pkt->pkt_cmd_fhdr.rx_id);
4750
4751 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4752 IOERR_INVALID_RPI, 0);
4753 return (0xff);
4754 }
4755
4756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4757 "ELS: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4758 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4759
4760 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4761 wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4762 wqe->CmdType = WQE_TYPE_GEN;
4763 if (!(hba->sli.sli4.param.PHWQ)) {
4764 wqe->DBDE = 1; /* Data type for BDE 0 */
4765 }
4766
4767 wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4768 wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
4769 wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4770 wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4771
4772 wqe->un.ElsRsp.RemoteId = did;
4773 wqe->PU = 0x3;
4774 wqe->OXId = xrip->rx_id;
4775
4776 sge->last = 1;
4777 /* Now sge is fully staged */
4778
4779 sge = xrip->SGList->virt;
4780 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4781 sizeof (ULP_SGE64));
4782
4783 if (rpip->RPI == FABRIC_RPI) {
4784 wqe->ContextTag = port->vpip->VPI;
4785 wqe->ContextType = WQE_VPI_CONTEXT;
4786 } else {
4787 wqe->ContextTag = rpip->RPI;
4788 wqe->ContextType = WQE_RPI_CONTEXT;
4789 }
4790
4791 if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4792 wqe->un.ElsCmd.SP = 1;
4793 wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4794 }
4795
4796 } else {
4797 /* ELS Request */
4798
4799 fcfp = port->vpip->vfip->fcfp;
4800 node = (emlxs_node_t *)iocbq->node;
4801 rpip = EMLXS_NODE_TO_RPI(port, node);
4802
4803 if (!rpip) {
4804 /* Use the fabric rpi */
4805 rpip = port->vpip->fabric_rpip;
4806 }
4807
4808 /* Next allocate an Exchange for this command */
4809 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4810 EMLXS_XRI_SOL_ELS_TYPE);
4811
4812 if (!xrip) {
4813 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4814 "Adapter Busy. Unable to allocate exchange. "
4815 "did=0x%x", did);
4816
4817 return (FC_TRAN_BUSY);
4818 }
4819
4820 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4821 "ELS: Prep xri=%d iotag=%d rpi=%d",
4822 xrip->XRI, xrip->iotag, rpip->RPI);
4823
4824 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4825 wqe->Command = CMD_ELS_REQUEST64_CR;
4826 wqe->CmdType = WQE_TYPE_ELS;
4827 if (!(hba->sli.sli4.param.PHWQ)) {
4828 wqe->DBDE = 1; /* Data type for BDE 0 */
4829 }
4830
4831 wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4832 wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4833 wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4834
4835 wqe->un.ElsCmd.RemoteId = did;
4836 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4837
4838 /* setup for rsp */
4839 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4840 iocb->ULPPU = 1; /* Wd4 is relative offset */
4841
4842 sge->last = 0;
4843
4844 sge = xrip->SGList->virt;
4845 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4846 sizeof (ULP_SGE64));
4847
4848 wqe->un.ElsCmd.PayloadLength =
4849 pkt->pkt_cmdlen; /* Byte offset of rsp data */
4850
4851 /* RSP payload */
4852 sge = &stage_sge;
4853 sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4854 sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4855 sge->length = pkt->pkt_rsplen;
4856 sge->offset = 0;
4857 sge->last = 1;
4858 /* Now sge is fully staged */
4859
4860 sge = xrip->SGList->virt;
4861 sge++;
4862 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4863 sizeof (ULP_SGE64));
4864 #ifdef DEBUG_ELS
4865 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4866 "ELS: SGLaddr virt %p phys %p",
4867 xrip->SGList->virt, xrip->SGList->phys);
4868 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4869 "ELS: PAYLOAD virt %p phys %p",
4870 pkt->pkt_cmd, cp_cmd->dmac_laddress);
4871 emlxs_data_dump(port, "ELS: SGL",
4872 (uint32_t *)xrip->SGList->virt, 12, 0);
4873 #endif /* DEBUG_ELS */
4874
4875 switch (cmd) {
4876 case ELS_CMD_FLOGI:
4877 wqe->un.ElsCmd.SP = 1;
4878
4879 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4880 SLI_INTF_IF_TYPE_0) {
4881 wqe->ContextTag = fcfp->FCFI;
4882 wqe->ContextType = WQE_FCFI_CONTEXT;
4883 } else {
4884 wqe->ContextTag = port->vpip->VPI;
4885 wqe->ContextType = WQE_VPI_CONTEXT;
4886 }
4887
4888 if (hba->flag & FC_FIP_SUPPORTED) {
4889 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4890 }
4891
4892 if (hba->topology == TOPOLOGY_LOOP) {
4893 wqe->un.ElsCmd.LocalId = port->did;
4894 }
4895
4896 wqe->ELSId = WQE_ELSID_FLOGI;
4897 break;
4898 case ELS_CMD_FDISC:
4899 wqe->un.ElsCmd.SP = 1;
4900 wqe->ContextTag = port->vpip->VPI;
4901 wqe->ContextType = WQE_VPI_CONTEXT;
4902
4903 if (hba->flag & FC_FIP_SUPPORTED) {
4904 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4905 }
4906
4907 wqe->ELSId = WQE_ELSID_FDISC;
4908 break;
4909 case ELS_CMD_LOGO:
4910 if ((did == FABRIC_DID) &&
4911 (hba->flag & FC_FIP_SUPPORTED)) {
4912 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4913 }
4914
4915 wqe->ContextTag = port->vpip->VPI;
4916 wqe->ContextType = WQE_VPI_CONTEXT;
4917 wqe->ELSId = WQE_ELSID_LOGO;
4918 break;
4919 case ELS_CMD_PLOGI:
4920 if (rpip->RPI == FABRIC_RPI) {
4921 if (hba->flag & FC_PT_TO_PT) {
4922 wqe->un.ElsCmd.SP = 1;
4923 wqe->un.ElsCmd.LocalId = port->did;
4924 }
4925
4926 wqe->ContextTag = port->vpip->VPI;
4927 wqe->ContextType = WQE_VPI_CONTEXT;
4928 } else {
4929 wqe->ContextTag = rpip->RPI;
4930 wqe->ContextType = WQE_RPI_CONTEXT;
4931 }
4932
4933 wqe->ELSId = WQE_ELSID_PLOGI;
4934 break;
4935 default:
4936 if (rpip->RPI == FABRIC_RPI) {
4937 wqe->ContextTag = port->vpip->VPI;
4938 wqe->ContextType = WQE_VPI_CONTEXT;
4939 } else {
4940 wqe->ContextTag = rpip->RPI;
4941 wqe->ContextType = WQE_RPI_CONTEXT;
4942 }
4943
4944 wqe->ELSId = WQE_ELSID_CMD;
4945 break;
4946 }
4947
4948 #ifdef SFCT_SUPPORT
4949 /* This allows fct to abort the request */
4950 if (sbp->fct_cmd) {
4951 sbp->fct_cmd->cmd_oxid = xrip->XRI;
4952 sbp->fct_cmd->cmd_rxid = 0xFFFF;
4953 }
4954 #endif /* SFCT_SUPPORT */
4955 }
4956
4957 if (wqe->ContextType == WQE_VPI_CONTEXT) {
4958 reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4959
4960 if (!reserved_rpip) {
4961 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4962 "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4963 pkt->pkt_cmd_fhdr.rx_id);
4964
4965 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4966 IOERR_INVALID_RPI, 0);
4967 return (0xff);
4968 }
4969
4970 /* Store the reserved rpi */
4971 if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4972 wqe->OXId = reserved_rpip->RPI;
4973 } else {
4974 wqe->CmdSpecific = reserved_rpip->RPI;
4975 }
4976 }
4977
4978 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4979 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4980
4981 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4982 wqe->CCPE = 1;
4983 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4984 }
4985
4986 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4987 case FC_TRAN_CLASS2:
4988 wqe->Class = CLASS2;
4989 break;
4990 case FC_TRAN_CLASS3:
4991 default:
4992 wqe->Class = CLASS3;
4993 break;
4994 }
4995 sbp->class = wqe->Class;
4996 wqe->XRITag = xrip->XRI;
4997 wqe->RequestTag = xrip->iotag;
4998 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4999 return (FC_SUCCESS);
5000
5001 } /* emlxs_sli4_prep_els_iocb() */
5002
5003
5004 /*ARGSUSED*/
5005 static uint32_t
emlxs_sli4_prep_ct_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)5006 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
5007 {
5008 emlxs_hba_t *hba = HBA;
5009 fc_packet_t *pkt;
5010 IOCBQ *iocbq;
5011 IOCB *iocb;
5012 emlxs_wqe_t *wqe;
5013 NODELIST *node = NULL;
5014 CHANNEL *cp;
5015 RPIobj_t *rpip;
5016 XRIobj_t *xrip;
5017 uint32_t did;
5018
5019 pkt = PRIV2PKT(sbp);
5020 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
5021
5022 iocbq = &sbp->iocbq;
5023 wqe = &iocbq->wqe;
5024 iocb = &iocbq->iocb;
5025 bzero((void *)wqe, sizeof (emlxs_wqe_t));
5026 bzero((void *)iocb, sizeof (IOCB));
5027
5028 cp = &hba->chan[hba->channel_ct];
5029
5030 iocbq->port = (void *) port;
5031 iocbq->channel = (void *) cp;
5032
5033 sbp->bmp = NULL;
5034 sbp->channel = cp;
5035
5036 /* Initalize wqe */
5037 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
5038 /* CT Response */
5039
5040 sbp->xrip = 0;
5041 xrip = emlxs_sli4_register_xri(port, sbp,
5042 pkt->pkt_cmd_fhdr.rx_id, did);
5043
5044 if (!xrip) {
5045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
5046 "Unable to find XRI. rxid=%x",
5047 pkt->pkt_cmd_fhdr.rx_id);
5048
5049 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
5050 IOERR_NO_XRI, 0);
5051 return (0xff);
5052 }
5053
5054 rpip = xrip->rpip;
5055
5056 if (!rpip) {
5057 /* This means that we had a node registered */
5058 /* when the unsol request came in but the node */
5059 /* has since been unregistered. */
5060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
5061 "Unable to find RPI. rxid=%x",
5062 pkt->pkt_cmd_fhdr.rx_id);
5063
5064 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
5065 IOERR_INVALID_RPI, 0);
5066 return (0xff);
5067 }
5068
5069 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5070 "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
5071 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
5072
5073 if (emlxs_sli4_bde_setup(port, sbp)) {
5074 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5075 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
5076
5077 return (FC_TRAN_BUSY);
5078 }
5079
5080 if (!(hba->model_info.chip & EMLXS_BE_CHIPS)) {
5081 wqe->un.XmitSeq.Rsvd0 = 0; /* Word3 now reserved */
5082 }
5083
5084 if (!(hba->sli.sli4.param.PHWQ)) {
5085 wqe->DBDE = 1; /* Data type for BDE 0 */
5086 }
5087
5088 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CR;
5089 wqe->CmdType = WQE_TYPE_GEN;
5090 wqe->Command = CMD_XMIT_SEQUENCE64_CR;
5091 wqe->LenLoc = 2;
5092
5093 if (((SLI_CT_REQUEST *) pkt->pkt_cmd)->CommandResponse.bits.
5094 CmdRsp == (LE_SWAP16(SLI_CT_LOOPBACK))) {
5095 wqe->un.XmitSeq.xo = 1;
5096 } else {
5097 wqe->un.XmitSeq.xo = 0;
5098 }
5099
5100 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
5101 wqe->un.XmitSeq.ls = 1;
5102 }
5103
5104 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
5105 wqe->un.XmitSeq.si = 1;
5106 }
5107
5108 wqe->un.XmitSeq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
5109 wqe->un.XmitSeq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
5110 wqe->un.XmitSeq.Type = pkt->pkt_cmd_fhdr.type;
5111 wqe->OXId = xrip->rx_id;
5112 wqe->XC = 0; /* xri_tag is a new exchange */
5113 wqe->CmdSpecific = wqe->un.GenReq.Payload.tus.f.bdeSize;
5114
5115 } else {
5116 /* CT Request */
5117
5118 node = (emlxs_node_t *)iocbq->node;
5119 rpip = EMLXS_NODE_TO_RPI(port, node);
5120
5121 if (!rpip) {
5122 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
5123 "Unable to find rpi. did=0x%x rpi=%d",
5124 did, node->nlp_Rpi);
5125
5126 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
5127 IOERR_INVALID_RPI, 0);
5128 return (0xff);
5129 }
5130
5131 /* Next allocate an Exchange for this command */
5132 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
5133 EMLXS_XRI_SOL_CT_TYPE);
5134
5135 if (!xrip) {
5136 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5137 "Adapter Busy. Unable to allocate exchange. "
5138 "did=0x%x", did);
5139
5140 return (FC_TRAN_BUSY);
5141 }
5142
5143 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5144 "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
5145 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
5146
5147 if (emlxs_sli4_bde_setup(port, sbp)) {
5148 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
5149 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
5150
5151 emlxs_sli4_free_xri(port, sbp, xrip, 1);
5152 return (FC_TRAN_BUSY);
5153 }
5154
5155 if (!(hba->sli.sli4.param.PHWQ)) {
5156 wqe->DBDE = 1; /* Data type for BDE 0 */
5157 }
5158
5159 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
5160 wqe->CmdType = WQE_TYPE_GEN;
5161 wqe->Command = CMD_GEN_REQUEST64_CR;
5162 wqe->un.GenReq.la = 1;
5163 wqe->un.GenReq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
5164 wqe->un.GenReq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
5165 wqe->un.GenReq.Type = pkt->pkt_cmd_fhdr.type;
5166
5167 #ifdef DEBUG_CT
5168 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5169 "CT: SGLaddr virt %p phys %p", xrip->SGList->virt,
5170 xrip->SGList->phys);
5171 emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList->virt,
5172 12, 0);
5173 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5174 "CT: CMD virt %p len %d:%d",
5175 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
5176 emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
5177 20, 0);
5178 #endif /* DEBUG_CT */
5179
5180 #ifdef SFCT_SUPPORT
5181 /* This allows fct to abort the request */
5182 if (sbp->fct_cmd) {
5183 sbp->fct_cmd->cmd_oxid = xrip->XRI;
5184 sbp->fct_cmd->cmd_rxid = 0xFFFF;
5185 }
5186 #endif /* SFCT_SUPPORT */
5187 }
5188
5189 /* Setup for rsp */
5190 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
5191 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
5192 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
5193 iocb->ULPPU = 1; /* Wd4 is relative offset */
5194
5195 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
5196 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
5197
5198 wqe->ContextTag = rpip->RPI;
5199 wqe->ContextType = WQE_RPI_CONTEXT;
5200 wqe->XRITag = xrip->XRI;
5201 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
5202
5203 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
5204 wqe->CCPE = 1;
5205 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
5206 }
5207
5208 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
5209 case FC_TRAN_CLASS2:
5210 wqe->Class = CLASS2;
5211 break;
5212 case FC_TRAN_CLASS3:
5213 default:
5214 wqe->Class = CLASS3;
5215 break;
5216 }
5217 sbp->class = wqe->Class;
5218 wqe->RequestTag = xrip->iotag;
5219 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
5220 return (FC_SUCCESS);
5221
5222 } /* emlxs_sli4_prep_ct_iocb() */
5223
5224
5225 /*ARGSUSED*/
5226 static int
emlxs_sli4_read_eq(emlxs_hba_t * hba,EQ_DESC_t * eq)5227 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
5228 {
5229 uint32_t *ptr;
5230 EQE_u eqe;
5231 int rc = 0;
5232 off_t offset;
5233
5234 mutex_enter(&EMLXS_PORT_LOCK);
5235
5236 ptr = eq->addr.virt;
5237 ptr += eq->host_index;
5238
5239 offset = (off_t)((uint64_t)((unsigned long)
5240 eq->addr.virt) -
5241 (uint64_t)((unsigned long)
5242 hba->sli.sli4.slim2.virt));
5243
5244 EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
5245 4096, DDI_DMA_SYNC_FORKERNEL);
5246
5247 eqe.word = *ptr;
5248 eqe.word = BE_SWAP32(eqe.word);
5249
5250 if ((eqe.word & EQE_VALID) == eq->qe_valid) {
5251 rc = 1;
5252 }
5253
5254 mutex_exit(&EMLXS_PORT_LOCK);
5255
5256 return (rc);
5257
5258 } /* emlxs_sli4_read_eq */
5259
5260
5261 static void
emlxs_sli4_poll_intr(emlxs_hba_t * hba)5262 emlxs_sli4_poll_intr(emlxs_hba_t *hba)
5263 {
5264 int rc = 0;
5265 int i;
5266 char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
5267
5268 /* Check attention bits once and process if required */
5269
5270 for (i = 0; i < hba->intr_count; i++) {
5271 rc = emlxs_sli4_read_eq(hba, &hba->sli.sli4.eq[i]);
5272 if (rc == 1) {
5273 break;
5274 }
5275 }
5276
5277 if (rc != 1) {
5278 return;
5279 }
5280
5281 (void) emlxs_sli4_msi_intr((char *)hba,
5282 (char *)(unsigned long)arg[i]);
5283
5284 return;
5285
5286 } /* emlxs_sli4_poll_intr() */
5287
5288
5289 /*ARGSUSED*/
5290 static void
emlxs_sli4_process_async_event(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)5291 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
5292 {
5293 emlxs_port_t *port = &PPORT;
5294 uint8_t status;
5295
5296 /* Save the event tag */
5297 if (hba->link_event_tag == cqe->un.link.event_tag) {
5298 HBASTATS.LinkMultiEvent++;
5299 } else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) {
5300 HBASTATS.LinkMultiEvent++;
5301 }
5302 hba->link_event_tag = cqe->un.link.event_tag;
5303
5304 switch (cqe->event_code) {
5305 case ASYNC_EVENT_CODE_FCOE_LINK_STATE:
5306 HBASTATS.LinkEvent++;
5307
5308 switch (cqe->un.link.link_status) {
5309 case ASYNC_EVENT_PHYS_LINK_UP:
5310 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5311 "Link Async Event: PHYS_LINK_UP. val=%d "
5312 "type=%x event=%x",
5313 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5314 break;
5315
5316 case ASYNC_EVENT_LOGICAL_LINK_UP:
5317 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5318 "Link Async Event: LOGICAL_LINK_UP. val=%d "
5319 "type=%x event=%x",
5320 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5321
5322 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5323 break;
5324
5325 case ASYNC_EVENT_PHYS_LINK_DOWN:
5326 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5327 "Link Async Event: PHYS_LINK_DOWN. val=%d "
5328 "type=%x event=%x",
5329 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5330
5331 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5332 break;
5333
5334 case ASYNC_EVENT_LOGICAL_LINK_DOWN:
5335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5336 "Link Async Event: LOGICAL_LINK_DOWN. val=%d "
5337 "type=%x event=%x",
5338 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5339
5340 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5341 break;
5342 default:
5343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5344 "Link Async Event: Unknown link status=%d event=%x",
5345 cqe->un.link.link_status, HBASTATS.LinkEvent);
5346 break;
5347 }
5348 break;
5349 case ASYNC_EVENT_CODE_FCOE_FIP:
5350 switch (cqe->un.fcoe.evt_type) {
5351 case ASYNC_EVENT_NEW_FCF_DISC:
5352 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5353 "FIP Async Event: FCF_FOUND %d:%d",
5354 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5355
5356 (void) emlxs_fcf_found_notify(port,
5357 cqe->un.fcoe.ref_index);
5358 break;
5359 case ASYNC_EVENT_FCF_TABLE_FULL:
5360 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5361 "FIP Async Event: FCFTAB_FULL %d:%d",
5362 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5363
5364 (void) emlxs_fcf_full_notify(port);
5365 break;
5366 case ASYNC_EVENT_FCF_DEAD:
5367 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5368 "FIP Async Event: FCF_LOST %d:%d",
5369 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5370
5371 (void) emlxs_fcf_lost_notify(port,
5372 cqe->un.fcoe.ref_index);
5373 break;
5374 case ASYNC_EVENT_VIRT_LINK_CLEAR:
5375 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5376 "FIP Async Event: CVL %d",
5377 cqe->un.fcoe.ref_index);
5378
5379 (void) emlxs_fcf_cvl_notify(port,
5380 emlxs_sli4_vpi_to_index(hba,
5381 cqe->un.fcoe.ref_index));
5382 break;
5383
5384 case ASYNC_EVENT_FCF_MODIFIED:
5385 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5386 "FIP Async Event: FCF_CHANGED %d",
5387 cqe->un.fcoe.ref_index);
5388
5389 (void) emlxs_fcf_changed_notify(port,
5390 cqe->un.fcoe.ref_index);
5391 break;
5392 default:
5393 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5394 "FIP Async Event: Unknown event type=%d",
5395 cqe->un.fcoe.evt_type);
5396 break;
5397 }
5398 break;
5399 case ASYNC_EVENT_CODE_DCBX:
5400 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5401 "DCBX Async Event: type=%d. Not supported.",
5402 cqe->event_type);
5403 break;
5404 case ASYNC_EVENT_CODE_GRP_5:
5405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5406 "Group 5 Async Event: type=%d.", cqe->event_type);
5407 if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
5408 hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
5409 }
5410 break;
5411 case ASYNC_EVENT_CODE_FC_EVENT:
5412 switch (cqe->event_type) {
5413 case ASYNC_EVENT_FC_LINK_ATT:
5414 HBASTATS.LinkEvent++;
5415
5416 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5417 "FC Async Event: Link Attention. event=%x",
5418 HBASTATS.LinkEvent);
5419
5420 emlxs_sli4_handle_fc_link_att(hba, cqe);
5421 break;
5422 case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5423 HBASTATS.LinkEvent++;
5424
5425 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5426 "FC Async Event: Shared Link Attention. event=%x",
5427 HBASTATS.LinkEvent);
5428
5429 emlxs_sli4_handle_fc_link_att(hba, cqe);
5430 break;
5431 default:
5432 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5433 "FC Async Event: Unknown event. type=%d event=%x",
5434 cqe->event_type, HBASTATS.LinkEvent);
5435 }
5436 break;
5437 case ASYNC_EVENT_CODE_PORT:
5438 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5439 "SLI Port Async Event: type=%d", cqe->event_type);
5440
5441 switch (cqe->event_type) {
5442 case ASYNC_EVENT_PORT_OTEMP:
5443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5444 "SLI Port Async Event: Temperature limit exceeded");
5445 cmn_err(CE_WARN,
5446 "^%s%d: Temperature limit exceeded. Fibre channel "
5447 "controller temperature %u degrees C",
5448 DRIVER_NAME, hba->ddiinst,
5449 BE_SWAP32(*(uint32_t *)cqe->un.port.link_status));
5450 break;
5451
5452 case ASYNC_EVENT_PORT_NTEMP:
5453 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5454 "SLI Port Async Event: Temperature returned to "
5455 "normal");
5456 cmn_err(CE_WARN,
5457 "^%s%d: Temperature returned to normal",
5458 DRIVER_NAME, hba->ddiinst);
5459 break;
5460
5461 case ASYNC_EVENT_MISCONFIG_PORT:
5462 *((uint32_t *)cqe->un.port.link_status) =
5463 BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5464 status =
5465 cqe->un.port.link_status[hba->sli.sli4.link_number];
5466
5467 switch (status) {
5468 case 0 :
5469 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5470 "SLI Port Async Event: link%d misconfig "
5471 "functional", hba->sli.sli4.link_number);
5472 break;
5473
5474 case 1 :
5475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5476 "SLI Port Async Event: Physical media not "
5477 "detected");
5478 cmn_err(CE_WARN,
5479 "^%s%d: Optics faulted/incorrectly "
5480 "installed/not installed - Reseat optics, "
5481 "if issue not resolved, replace.",
5482 DRIVER_NAME, hba->ddiinst);
5483 break;
5484
5485 case 2 :
5486 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5487 "SLI Port Async Event: Wrong physical "
5488 "media detected");
5489 cmn_err(CE_WARN,
5490 "^%s%d: Optics of two types installed - "
5491 "Remove one optic or install matching"
5492 "pair of optics.",
5493 DRIVER_NAME, hba->ddiinst);
5494 break;
5495
5496 case 3 :
5497 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5498 "SLI Port Async Event: Unsupported "
5499 "physical media detected");
5500 cmn_err(CE_WARN,
5501 "^%s%d: Incompatible optics - Replace "
5502 "with compatible optics for card to "
5503 "function.",
5504 DRIVER_NAME, hba->ddiinst);
5505 break;
5506
5507 default :
5508 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5509 "SLI Port Async Event: Physical media "
5510 "error, status=%x", status);
5511 cmn_err(CE_WARN,
5512 "^%s%d: Misconfigured port: status=0x%x - "
5513 "Check optics on card.",
5514 DRIVER_NAME, hba->ddiinst, status);
5515 break;
5516 }
5517 break;
5518 }
5519
5520 break;
5521 case ASYNC_EVENT_CODE_VF:
5522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5523 "VF Async Event: type=%d",
5524 cqe->event_type);
5525 break;
5526 case ASYNC_EVENT_CODE_MR:
5527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5528 "MR Async Event: type=%d",
5529 cqe->event_type);
5530 break;
5531 default:
5532 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5533 "Unknown Async Event: code=%d type=%d.",
5534 cqe->event_code, cqe->event_type);
5535 break;
5536 }
5537
5538 } /* emlxs_sli4_process_async_event() */
5539
5540
5541 /*ARGSUSED*/
5542 static void
emlxs_sli4_process_mbox_event(emlxs_hba_t * hba,CQE_MBOX_t * cqe)5543 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
5544 {
5545 emlxs_port_t *port = &PPORT;
5546 MAILBOX4 *mb;
5547 MATCHMAP *mbox_bp;
5548 MATCHMAP *mbox_nonembed;
5549 MAILBOXQ *mbq = NULL;
5550 uint32_t size;
5551 uint32_t *iptr;
5552 int rc;
5553 off_t offset;
5554
5555 if (cqe->consumed && !cqe->completed) {
5556 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5557 "CQ ENTRY: Mbox event. Entry consumed but not completed");
5558 return;
5559 }
5560
5561 mutex_enter(&EMLXS_PORT_LOCK);
5562 switch (hba->mbox_queue_flag) {
5563 case 0:
5564 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5565 "CQ ENTRY: Mbox event. No mailbox active.");
5566
5567 mutex_exit(&EMLXS_PORT_LOCK);
5568 return;
5569
5570 case MBX_POLL:
5571
5572 /* Mark mailbox complete, this should wake up any polling */
5573 /* threads. This can happen if interrupts are enabled while */
5574 /* a polled mailbox command is outstanding. If we don't set */
5575 /* MBQ_COMPLETED here, the polling thread may wait until */
5576 /* timeout error occurs */
5577
5578 mutex_enter(&EMLXS_MBOX_LOCK);
5579 mbq = (MAILBOXQ *)hba->mbox_mbq;
5580 if (mbq) {
5581 port = (emlxs_port_t *)mbq->port;
5582 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5583 "CQ ENTRY: Mbox event. Completing Polled command.");
5584 mbq->flag |= MBQ_COMPLETED;
5585 }
5586 mutex_exit(&EMLXS_MBOX_LOCK);
5587
5588 mutex_exit(&EMLXS_PORT_LOCK);
5589 return;
5590
5591 case MBX_SLEEP:
5592 case MBX_NOWAIT:
5593 /* Check mbox_timer, it acts as a service flag too */
5594 /* The first to service the mbox queue will clear the timer */
5595 if (hba->mbox_timer) {
5596 hba->mbox_timer = 0;
5597
5598 mutex_enter(&EMLXS_MBOX_LOCK);
5599 mbq = (MAILBOXQ *)hba->mbox_mbq;
5600 mutex_exit(&EMLXS_MBOX_LOCK);
5601 }
5602
5603 if (!mbq) {
5604 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5605 "Mailbox event. No service required.");
5606 mutex_exit(&EMLXS_PORT_LOCK);
5607 return;
5608 }
5609
5610 mb = (MAILBOX4 *)mbq;
5611 mutex_exit(&EMLXS_PORT_LOCK);
5612 break;
5613
5614 default:
5615 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5616 "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
5617 hba->mbox_queue_flag);
5618
5619 mutex_exit(&EMLXS_PORT_LOCK);
5620 return;
5621 }
5622
5623 /* Set port context */
5624 port = (emlxs_port_t *)mbq->port;
5625
5626 offset = (off_t)((uint64_t)((unsigned long)
5627 hba->sli.sli4.mq.addr.virt) -
5628 (uint64_t)((unsigned long)
5629 hba->sli.sli4.slim2.virt));
5630
5631 /* Now that we are the owner, DMA Sync entire MQ if needed */
5632 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
5633 4096, DDI_DMA_SYNC_FORDEV);
5634
5635 BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
5636 MAILBOX_CMD_SLI4_BSIZE);
5637
5638 if (mb->mbxCommand != MBX_HEARTBEAT) {
5639 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5640 "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
5641 mb->mbxStatus, mb->mbxCommand);
5642
5643 emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
5644 12, 0);
5645 }
5646
5647 if (mb->mbxCommand == MBX_SLI_CONFIG) {
5648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5649 "Mbox sge_cnt: %d length: %d embed: %d",
5650 mb->un.varSLIConfig.be.sge_cnt,
5651 mb->un.varSLIConfig.be.payload_length,
5652 mb->un.varSLIConfig.be.embedded);
5653 }
5654
5655 /* Now sync the memory buffer if one was used */
5656 if (mbq->bp) {
5657 mbox_bp = (MATCHMAP *)mbq->bp;
5658 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5659 DDI_DMA_SYNC_FORKERNEL);
5660 #ifdef FMA_SUPPORT
5661 if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
5662 != DDI_FM_OK) {
5663 EMLXS_MSGF(EMLXS_CONTEXT,
5664 &emlxs_invalid_dma_handle_msg,
5665 "sli4_process_mbox_event: hdl=%p",
5666 mbox_bp->dma_handle);
5667
5668 mb->mbxStatus = MBXERR_DMA_ERROR;
5669 }
5670 #endif
5671 }
5672
5673 /* Now sync the memory buffer if one was used */
5674 if (mbq->nonembed) {
5675 mbox_nonembed = (MATCHMAP *)mbq->nonembed;
5676 size = mbox_nonembed->size;
5677 EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
5678 DDI_DMA_SYNC_FORKERNEL);
5679 iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
5680 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
5681
5682 #ifdef FMA_SUPPORT
5683 if (emlxs_fm_check_dma_handle(hba,
5684 mbox_nonembed->dma_handle) != DDI_FM_OK) {
5685 EMLXS_MSGF(EMLXS_CONTEXT,
5686 &emlxs_invalid_dma_handle_msg,
5687 "sli4_process_mbox_event: hdl=%p",
5688 mbox_nonembed->dma_handle);
5689
5690 mb->mbxStatus = MBXERR_DMA_ERROR;
5691 }
5692 #endif
5693 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
5694 }
5695
5696 /* Mailbox has been completely received at this point */
5697
5698 if (mb->mbxCommand == MBX_HEARTBEAT) {
5699 hba->heartbeat_active = 0;
5700 goto done;
5701 }
5702
5703 if (hba->mbox_queue_flag == MBX_SLEEP) {
5704 if (mb->mbxCommand != MBX_DOWN_LOAD
5705 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5706 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5707 "Received. %s: status=%x Sleep.",
5708 emlxs_mb_cmd_xlate(mb->mbxCommand),
5709 mb->mbxStatus);
5710 }
5711 } else {
5712 if (mb->mbxCommand != MBX_DOWN_LOAD
5713 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5714 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5715 "Completed. %s: status=%x",
5716 emlxs_mb_cmd_xlate(mb->mbxCommand),
5717 mb->mbxStatus);
5718 }
5719 }
5720
5721 /* Filter out passthru mailbox */
5722 if (mbq->flag & MBQ_PASSTHRU) {
5723 goto done;
5724 }
5725
5726 if (mb->mbxStatus) {
5727 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5728 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5729 (uint32_t)mb->mbxStatus);
5730 }
5731
5732 if (mbq->mbox_cmpl) {
5733 rc = (mbq->mbox_cmpl)(hba, mbq);
5734
5735 /* If mbox was retried, return immediately */
5736 if (rc) {
5737 return;
5738 }
5739 }
5740
5741 done:
5742
5743 /* Clean up the mailbox area */
5744 emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
5745
5746 /* Attempt to send pending mailboxes */
5747 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5748 if (mbq) {
5749 /* Attempt to send pending mailboxes */
5750 rc = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5751 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5752 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5753 }
5754 }
5755 return;
5756
5757 } /* emlxs_sli4_process_mbox_event() */
5758
5759
5760 /*ARGSUSED*/
5761 static void
emlxs_CQE_to_IOCB(emlxs_hba_t * hba,CQE_CmplWQ_t * cqe,emlxs_buf_t * sbp)5762 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
5763 {
5764 #ifdef DEBUG_FASTPATH
5765 emlxs_port_t *port = &PPORT;
5766 #endif /* DEBUG_FASTPATH */
5767 IOCBQ *iocbq;
5768 IOCB *iocb;
5769 uint32_t *iptr;
5770 fc_packet_t *pkt;
5771 emlxs_wqe_t *wqe;
5772
5773 iocbq = &sbp->iocbq;
5774 wqe = &iocbq->wqe;
5775 iocb = &iocbq->iocb;
5776
5777 #ifdef DEBUG_FASTPATH
5778 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5779 "CQE to IOCB: cmd:%x iotag:%x xri:%d", wqe->Command,
5780 wqe->RequestTag, wqe->XRITag);
5781 #endif /* DEBUG_FASTPATH */
5782
5783 iocb->ULPSTATUS = cqe->Status;
5784 iocb->un.ulpWord[4] = cqe->Parameter;
5785 iocb->ULPIOTAG = cqe->RequestTag;
5786 iocb->ULPCONTEXT = wqe->XRITag;
5787
5788 switch (wqe->Command) {
5789
5790 case CMD_FCP_ICMND64_CR:
5791 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
5792 break;
5793
5794 case CMD_FCP_IREAD64_CR:
5795 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
5796 iocb->ULPPU = PARM_XFER_CHECK;
5797 if (iocb->ULPSTATUS == IOSTAT_FCP_RSP_ERROR) {
5798 iocb->un.fcpi64.fcpi_parm =
5799 wqe->un.FcpCmd.TotalTransferCount -
5800 cqe->CmdSpecific;
5801 }
5802 break;
5803
5804 case CMD_FCP_IWRITE64_CR:
5805 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
5806 if (iocb->ULPSTATUS == IOSTAT_FCP_RSP_ERROR) {
5807 if (wqe->un.FcpCmd.TotalTransferCount >
5808 cqe->CmdSpecific) {
5809 iocb->un.fcpi64.fcpi_parm =
5810 wqe->un.FcpCmd.TotalTransferCount -
5811 cqe->CmdSpecific;
5812 } else {
5813 iocb->un.fcpi64.fcpi_parm = 0;
5814 }
5815 }
5816 break;
5817
5818 case CMD_ELS_REQUEST64_CR:
5819 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
5820 iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
5821 if (iocb->ULPSTATUS == 0) {
5822 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5823 }
5824 if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
5825 /* For LS_RJT, the driver populates the rsp buffer */
5826 pkt = PRIV2PKT(sbp);
5827 iptr = (uint32_t *)pkt->pkt_resp;
5828 *iptr++ = ELS_CMD_LS_RJT;
5829 *iptr = cqe->Parameter;
5830 }
5831 break;
5832
5833 case CMD_GEN_REQUEST64_CR:
5834 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
5835 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5836 break;
5837
5838 case CMD_XMIT_SEQUENCE64_CR:
5839 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
5840 break;
5841
5842 case CMD_ABORT_XRI_CX:
5843 iocb->ULPCONTEXT = wqe->AbortTag;
5844 break;
5845
5846 case CMD_FCP_TRECEIVE64_CX:
5847 /* free memory for XRDY */
5848 if (iocbq->bp) {
5849 emlxs_mem_buf_free(hba, iocbq->bp);
5850 iocbq->bp = 0;
5851 }
5852
5853 /*FALLTHROUGH*/
5854
5855 case CMD_FCP_TSEND64_CX:
5856 case CMD_FCP_TRSP64_CX:
5857 default:
5858 iocb->ULPCOMMAND = wqe->Command;
5859
5860 }
5861 } /* emlxs_CQE_to_IOCB() */
5862
5863
5864 /*ARGSUSED*/
5865 static void
emlxs_sli4_hba_flush_chipq(emlxs_hba_t * hba)5866 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
5867 {
5868 emlxs_port_t *port = &PPORT;
5869 CHANNEL *cp;
5870 emlxs_buf_t *sbp;
5871 IOCBQ *iocbq;
5872 uint16_t i;
5873 uint32_t trigger = 0;
5874 CQE_CmplWQ_t cqe;
5875
5876 mutex_enter(&EMLXS_FCTAB_LOCK);
5877 for (i = 0; i < hba->max_iotag; i++) {
5878 sbp = hba->fc_table[i];
5879 if (sbp == NULL || sbp == STALE_PACKET) {
5880 continue;
5881 }
5882 hba->fc_table[i] = STALE_PACKET;
5883 hba->io_count--;
5884 sbp->iotag = 0;
5885 mutex_exit(&EMLXS_FCTAB_LOCK);
5886
5887 cp = sbp->channel;
5888 bzero(&cqe, sizeof (CQE_CmplWQ_t));
5889 cqe.RequestTag = i;
5890 cqe.Status = IOSTAT_LOCAL_REJECT;
5891 cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
5892
5893 cp->hbaCmplCmd_sbp++;
5894
5895 #ifdef SFCT_SUPPORT
5896 #ifdef FCT_IO_TRACE
5897 if (sbp->fct_cmd) {
5898 emlxs_fct_io_trace(port, sbp->fct_cmd,
5899 EMLXS_FCT_IOCB_COMPLETE);
5900 }
5901 #endif /* FCT_IO_TRACE */
5902 #endif /* SFCT_SUPPORT */
5903
5904 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5905 atomic_dec_32(&hba->io_active);
5906 #ifdef NODE_THROTTLE_SUPPORT
5907 if (sbp->node) {
5908 atomic_dec_32(&sbp->node->io_active);
5909 }
5910 #endif /* NODE_THROTTLE_SUPPORT */
5911 }
5912
5913 /* Copy entry to sbp's iocbq */
5914 iocbq = &sbp->iocbq;
5915 emlxs_CQE_to_IOCB(hba, &cqe, sbp);
5916
5917 iocbq->next = NULL;
5918
5919 /* Exchange is no longer busy on-chip, free it */
5920 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
5921
5922 if (!(sbp->pkt_flags &
5923 (PACKET_POLLED | PACKET_ALLOCATED))) {
5924 /* Add the IOCB to the channel list */
5925 mutex_enter(&cp->rsp_lock);
5926 if (cp->rsp_head == NULL) {
5927 cp->rsp_head = iocbq;
5928 cp->rsp_tail = iocbq;
5929 } else {
5930 cp->rsp_tail->next = iocbq;
5931 cp->rsp_tail = iocbq;
5932 }
5933 mutex_exit(&cp->rsp_lock);
5934 trigger = 1;
5935 } else {
5936 emlxs_proc_channel_event(hba, cp, iocbq);
5937 }
5938 mutex_enter(&EMLXS_FCTAB_LOCK);
5939 }
5940 mutex_exit(&EMLXS_FCTAB_LOCK);
5941
5942 if (trigger) {
5943 for (i = 0; i < hba->chan_count; i++) {
5944 cp = &hba->chan[i];
5945 if (cp->rsp_head != NULL) {
5946 emlxs_thread_trigger2(&cp->intr_thread,
5947 emlxs_proc_channel, cp);
5948 }
5949 }
5950 }
5951
5952 } /* emlxs_sli4_hba_flush_chipq() */
5953
5954
5955 /*ARGSUSED*/
5956 static void
emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_CmplWQ_t * cqe)5957 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
5958 CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5959 {
5960 emlxs_port_t *port = &PPORT;
5961 CHANNEL *cp;
5962 uint16_t request_tag;
5963
5964 request_tag = cqe->RequestTag;
5965
5966 /* 1 to 1 mapping between CQ and channel */
5967 cp = cq->channelp;
5968
5969 cp->hbaCmplCmd++;
5970
5971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5972 "CQ ENTRY: OOR Cmpl: iotag=%d", request_tag);
5973
5974 emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 4, 0);
5975
5976 } /* emlxs_sli4_process_oor_wqe_cmpl() */
5977
5978
5979 /*ARGSUSED*/
5980 static void
emlxs_sli4_process_wqe_cmpl(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_CmplWQ_t * cqe)5981 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5982 {
5983 emlxs_port_t *port = &PPORT;
5984 CHANNEL *cp;
5985 emlxs_buf_t *sbp;
5986 IOCBQ *iocbq;
5987 uint16_t request_tag;
5988 #ifdef SFCT_SUPPORT
5989 #ifdef FCT_IO_TRACE
5990 fct_cmd_t *fct_cmd;
5991 emlxs_buf_t *cmd_sbp;
5992 #endif /* FCT_IO_TRACE */
5993 #endif /* SFCT_SUPPORT */
5994
5995 request_tag = cqe->RequestTag;
5996
5997 /* 1 to 1 mapping between CQ and channel */
5998 cp = cq->channelp;
5999
6000 mutex_enter(&EMLXS_FCTAB_LOCK);
6001 sbp = hba->fc_table[request_tag];
6002
6003 if (!sbp) {
6004 cp->hbaCmplCmd++;
6005 mutex_exit(&EMLXS_FCTAB_LOCK);
6006 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6007 "CQ ENTRY: NULL sbp. iotag=%d. Dropping...",
6008 request_tag);
6009 return;
6010 }
6011
6012 if (sbp == STALE_PACKET) {
6013 cp->hbaCmplCmd_sbp++;
6014 mutex_exit(&EMLXS_FCTAB_LOCK);
6015 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6016 "CQ ENTRY: Stale sbp. iotag=%d. Dropping...", request_tag);
6017 return;
6018 }
6019
6020 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
6021 atomic_add_32(&hba->io_active, -1);
6022 #ifdef NODE_THROTTLE_SUPPORT
6023 if (sbp->node) {
6024 atomic_add_32(&sbp->node->io_active, -1);
6025 }
6026 #endif /* NODE_THROTTLE_SUPPORT */
6027 }
6028
6029 if (!(sbp->xrip)) {
6030 cp->hbaCmplCmd++;
6031 mutex_exit(&EMLXS_FCTAB_LOCK);
6032 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6033 "CQ ENTRY: NULL sbp xrip %p. iotag=%d. Dropping...",
6034 sbp, request_tag);
6035 return;
6036 }
6037
6038 #ifdef DEBUG_FASTPATH
6039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6040 "CQ ENTRY: process wqe compl");
6041 #endif /* DEBUG_FASTPATH */
6042 cp->hbaCmplCmd_sbp++;
6043
6044 /* Copy entry to sbp's iocbq */
6045 iocbq = &sbp->iocbq;
6046 emlxs_CQE_to_IOCB(hba, cqe, sbp);
6047
6048 iocbq->next = NULL;
6049
6050 if (cqe->XB) {
6051 /* Mark exchange as ABORT in progress */
6052 sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
6053 sbp->xrip->flag |= EMLXS_XRI_BUSY;
6054
6055 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6056 "CQ ENTRY: XRI BUSY: iotag=%d xri=%d", request_tag,
6057 sbp->xrip->XRI);
6058
6059 emlxs_sli4_free_xri(port, sbp, 0, 0);
6060 } else {
6061 /* Exchange is no longer busy on-chip, free it */
6062 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
6063 }
6064
6065 mutex_exit(&EMLXS_FCTAB_LOCK);
6066
6067 #ifdef SFCT_SUPPORT
6068 #ifdef FCT_IO_TRACE
6069 fct_cmd = sbp->fct_cmd;
6070 if (fct_cmd) {
6071 cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
6072 mutex_enter(&cmd_sbp->fct_mtx);
6073 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
6074 mutex_exit(&cmd_sbp->fct_mtx);
6075 }
6076 #endif /* FCT_IO_TRACE */
6077 #endif /* SFCT_SUPPORT */
6078
6079 /*
6080 * If this is NOT a polled command completion
6081 * or a driver allocated pkt, then defer pkt
6082 * completion.
6083 */
6084 if (!(sbp->pkt_flags &
6085 (PACKET_POLLED | PACKET_ALLOCATED))) {
6086 /* Add the IOCB to the channel list */
6087 mutex_enter(&cp->rsp_lock);
6088 if (cp->rsp_head == NULL) {
6089 cp->rsp_head = iocbq;
6090 cp->rsp_tail = iocbq;
6091 } else {
6092 cp->rsp_tail->next = iocbq;
6093 cp->rsp_tail = iocbq;
6094 }
6095 mutex_exit(&cp->rsp_lock);
6096
6097 /* Delay triggering thread till end of ISR */
6098 cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
6099 } else {
6100 emlxs_proc_channel_event(hba, cp, iocbq);
6101 }
6102
6103 } /* emlxs_sli4_process_wqe_cmpl() */
6104
6105
6106 /*ARGSUSED*/
6107 static void
emlxs_sli4_process_release_wqe(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_RelWQ_t * cqe)6108 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
6109 CQE_RelWQ_t *cqe)
6110 {
6111 emlxs_port_t *port = &PPORT;
6112 WQ_DESC_t *wq;
6113 CHANNEL *cp;
6114 uint32_t i;
6115 uint16_t wqi;
6116
6117 wqi = emlxs_sli4_wqid_to_index(hba, (uint16_t)cqe->WQid);
6118
6119 /* Verify WQ index */
6120 if (wqi == 0xffff) {
6121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6122 "CQ ENTRY: Invalid WQid:%d. Dropping...",
6123 cqe->WQid);
6124 return;
6125 }
6126
6127 wq = &hba->sli.sli4.wq[wqi];
6128
6129 #ifdef DEBUG_FASTPATH
6130 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6131 "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
6132 cqe->WQindex);
6133 #endif /* DEBUG_FASTPATH */
6134
6135 wq->port_index = cqe->WQindex;
6136
6137 /* Cmd ring may be available. Try sending more iocbs */
6138 for (i = 0; i < hba->chan_count; i++) {
6139 cp = &hba->chan[i];
6140 if (wq == (WQ_DESC_t *)cp->iopath) {
6141 emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
6142 }
6143 }
6144
6145 } /* emlxs_sli4_process_release_wqe() */
6146
6147
6148 /*ARGSUSED*/
6149 emlxs_iocbq_t *
emlxs_sli4_rxq_get(emlxs_hba_t * hba,fc_frame_hdr_t * fchdr)6150 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
6151 {
6152 emlxs_queue_t *q;
6153 emlxs_iocbq_t *iocbq;
6154 emlxs_iocbq_t *prev;
6155 fc_frame_hdr_t *fchdr2;
6156 RXQ_DESC_t *rxq;
6157
6158 switch (fchdr->type) {
6159 case 1: /* ELS */
6160 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
6161 break;
6162 case 0x20: /* CT */
6163 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
6164 break;
6165 default:
6166 return (NULL);
6167 }
6168
6169 mutex_enter(&rxq->lock);
6170
6171 q = &rxq->active;
6172 iocbq = (emlxs_iocbq_t *)q->q_first;
6173 prev = NULL;
6174
6175 while (iocbq) {
6176
6177 fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
6178
6179 if ((fchdr2->s_id == fchdr->s_id) &&
6180 (fchdr2->ox_id == fchdr->ox_id) &&
6181 (fchdr2->seq_id == fchdr->seq_id)) {
6182 /* Remove iocbq */
6183 if (prev) {
6184 prev->next = iocbq->next;
6185 }
6186 if (q->q_first == (uint8_t *)iocbq) {
6187 q->q_first = (uint8_t *)iocbq->next;
6188 }
6189 if (q->q_last == (uint8_t *)iocbq) {
6190 q->q_last = (uint8_t *)prev;
6191 }
6192 q->q_cnt--;
6193
6194 break;
6195 }
6196
6197 prev = iocbq;
6198 iocbq = iocbq->next;
6199 }
6200
6201 mutex_exit(&rxq->lock);
6202
6203 return (iocbq);
6204
6205 } /* emlxs_sli4_rxq_get() */
6206
6207
6208 /*ARGSUSED*/
6209 void
emlxs_sli4_rxq_put(emlxs_hba_t * hba,emlxs_iocbq_t * iocbq)6210 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
6211 {
6212 emlxs_queue_t *q;
6213 fc_frame_hdr_t *fchdr;
6214 RXQ_DESC_t *rxq;
6215
6216 fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
6217
6218 switch (fchdr->type) {
6219 case 1: /* ELS */
6220 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
6221 break;
6222 case 0x20: /* CT */
6223 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
6224 break;
6225 default:
6226 return;
6227 }
6228
6229 mutex_enter(&rxq->lock);
6230
6231 q = &rxq->active;
6232
6233 if (q->q_last) {
6234 ((emlxs_iocbq_t *)q->q_last)->next = iocbq;
6235 q->q_cnt++;
6236 } else {
6237 q->q_first = (uint8_t *)iocbq;
6238 q->q_cnt = 1;
6239 }
6240
6241 q->q_last = (uint8_t *)iocbq;
6242 iocbq->next = NULL;
6243
6244 mutex_exit(&rxq->lock);
6245
6246 return;
6247
6248 } /* emlxs_sli4_rxq_put() */
6249
6250
6251 static void
emlxs_sli4_rq_post(emlxs_port_t * port,uint16_t rqid)6252 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
6253 {
6254 emlxs_hba_t *hba = HBA;
6255
6256 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6257 "RQ POST: rqid=%d count=1", rqid);
6258
6259 /* Ring the RQ doorbell once to repost the RQ buffer */
6260
6261 emlxs_sli4_write_rqdb(hba, rqid, 1);
6262
6263 } /* emlxs_sli4_rq_post() */
6264
6265
6266 /*ARGSUSED*/
6267 static void
emlxs_sli4_process_unsol_rcv(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_UnsolRcv_t * cqe)6268 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
6269 CQE_UnsolRcv_t *cqe)
6270 {
6271 emlxs_port_t *port = &PPORT;
6272 emlxs_port_t *vport;
6273 RQ_DESC_t *hdr_rq;
6274 RQ_DESC_t *data_rq;
6275 MBUF_INFO *hdr_mp;
6276 MBUF_INFO *data_mp;
6277 MATCHMAP *seq_mp;
6278 uint32_t *data;
6279 fc_frame_hdr_t fchdr;
6280 uint16_t hdr_rqi;
6281 uint32_t host_index;
6282 emlxs_iocbq_t *iocbq = NULL;
6283 emlxs_iocb_t *iocb;
6284 emlxs_node_t *node = NULL;
6285 uint32_t i;
6286 uint32_t seq_len;
6287 uint32_t seq_cnt;
6288 uint32_t buf_type;
6289 char label[32];
6290 emlxs_wqe_t *wqe;
6291 CHANNEL *cp;
6292 XRIobj_t *xrip;
6293 RPIobj_t *rpip = NULL;
6294 uint32_t cmd;
6295 uint32_t posted = 0;
6296 uint32_t abort = 1;
6297 off_t offset;
6298 uint32_t status;
6299 uint32_t data_size;
6300 uint16_t rqid;
6301 uint32_t hdr_size;
6302 fc_packet_t *pkt;
6303 emlxs_buf_t *sbp;
6304
6305 if (cqe->Code == CQE_TYPE_UNSOL_RCV_V1) {
6306 CQE_UnsolRcvV1_t *cqeV1 = (CQE_UnsolRcvV1_t *)cqe;
6307
6308 status = cqeV1->Status;
6309 data_size = cqeV1->data_size;
6310 rqid = cqeV1->RQid;
6311 hdr_size = cqeV1->hdr_size;
6312 } else {
6313 status = cqe->Status;
6314 data_size = cqe->data_size;
6315 rqid = cqe->RQid;
6316 hdr_size = cqe->hdr_size;
6317 }
6318
6319 /* Validate the CQE */
6320
6321 /* Check status */
6322 switch (status) {
6323 case RQ_STATUS_SUCCESS: /* 0x10 */
6324 break;
6325
6326 case RQ_STATUS_BUFLEN_EXCEEDED: /* 0x11 */
6327 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6328 "CQ ENTRY: Unsol Rcv: Payload truncated.");
6329 break;
6330
6331 case RQ_STATUS_NEED_BUFFER: /* 0x12 */
6332 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6333 "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
6334 return;
6335
6336 case RQ_STATUS_FRAME_DISCARDED: /* 0x13 */
6337 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6338 "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
6339 return;
6340
6341 default:
6342 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6343 "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
6344 status);
6345 break;
6346 }
6347
6348 /* Make sure there is a frame header */
6349 if (hdr_size < sizeof (fc_frame_hdr_t)) {
6350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6351 "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
6352 return;
6353 }
6354
6355 hdr_rqi = emlxs_sli4_rqid_to_index(hba, rqid);
6356
6357 /* Verify RQ index */
6358 if (hdr_rqi == 0xffff) {
6359 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6360 "CQ ENTRY: Unsol Rcv: Invalid RQID:%d. Dropping...",
6361 rqid);
6362 return;
6363 }
6364
6365 hdr_rq = &hba->sli.sli4.rq[hdr_rqi];
6366 data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
6367
6368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6369 "CQ ENTRY: Unsol Rcv:%x rqid=%d,%d index=%d status=%x "
6370 "hdr_size=%d data_size=%d",
6371 cqe->Code, rqid, hdr_rqi, hdr_rq->host_index, status, hdr_size,
6372 data_size);
6373
6374 hdr_rq->num_proc++;
6375
6376 /* Update host index */
6377 mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
6378 host_index = hdr_rq->host_index;
6379 hdr_rq->host_index++;
6380
6381 if (hdr_rq->host_index >= hdr_rq->max_index) {
6382 hdr_rq->host_index = 0;
6383 }
6384 data_rq->host_index = hdr_rq->host_index;
6385 mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
6386
6387 /* Get the next header rqb */
6388 hdr_mp = &hdr_rq->rqb[host_index];
6389
6390 offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
6391 (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
6392
6393 EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
6394 sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
6395
6396 LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
6397 sizeof (fc_frame_hdr_t));
6398
6399 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6400 "RQ HDR[%d]: rctl:%x type:%x "
6401 "sid:%x did:%x oxid:%x rxid:%x",
6402 host_index, fchdr.r_ctl, fchdr.type,
6403 fchdr.s_id, fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
6404
6405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6406 "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
6407 host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
6408 fchdr.df_ctl, fchdr.ro);
6409
6410 /* Verify fc header type */
6411 switch (fchdr.type) {
6412 case 0: /* BLS */
6413 if (fchdr.r_ctl != 0x81) {
6414 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6415 "RQ ENTRY: Unexpected FC rctl (0x%x) "
6416 "received. Dropping...",
6417 fchdr.r_ctl);
6418
6419 goto done;
6420 }
6421
6422 /* Make sure there is no payload */
6423 if (data_size != 0) {
6424 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6425 "RQ ENTRY: ABTS payload provided. Dropping...");
6426
6427 goto done;
6428 }
6429
6430 buf_type = 0xFFFFFFFF;
6431 (void) strlcpy(label, "ABTS", sizeof (label));
6432 cp = &hba->chan[hba->channel_els];
6433 break;
6434
6435 case 0x01: /* ELS */
6436 /* Make sure there is a payload */
6437 if (data_size == 0) {
6438 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6439 "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
6440 "Dropping...");
6441
6442 goto done;
6443 }
6444
6445 buf_type = MEM_ELSBUF;
6446 (void) strlcpy(label, "Unsol ELS", sizeof (label));
6447 cp = &hba->chan[hba->channel_els];
6448 break;
6449
6450 case 0x20: /* CT */
6451 /* Make sure there is a payload */
6452 if (data_size == 0) {
6453 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6454 "RQ ENTRY: Unsol Rcv: No CT payload provided. "
6455 "Dropping...");
6456
6457 goto done;
6458 }
6459
6460 buf_type = MEM_CTBUF;
6461 (void) strlcpy(label, "Unsol CT", sizeof (label));
6462 cp = &hba->chan[hba->channel_ct];
6463 break;
6464
6465 case 0x08: /* FCT */
6466 /* Make sure there is a payload */
6467 if (data_size == 0) {
6468 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6469 "RQ ENTRY: Unsol Rcv: No FCP payload provided. "
6470 "Dropping...");
6471
6472 goto done;
6473 }
6474
6475 buf_type = MEM_FCTBUF;
6476 (void) strlcpy(label, "Unsol FCT", sizeof (label));
6477 cp = &hba->chan[hba->CHANNEL_FCT];
6478 break;
6479
6480 default:
6481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6482 "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
6483 fchdr.type);
6484
6485 goto done;
6486 }
6487 /* Fc Header is valid */
6488
6489 /* Check if this is an active sequence */
6490 iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
6491
6492 if (!iocbq) {
6493 if (fchdr.type != 0) {
6494 if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
6495 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6496 "RQ ENTRY: %s: First of sequence not"
6497 " set. Dropping...",
6498 label);
6499
6500 goto done;
6501 }
6502 }
6503
6504 if ((fchdr.type != 0) && (fchdr.seq_cnt != 0)) {
6505 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6506 "RQ ENTRY: %s: Sequence count not zero (%d). "
6507 "Dropping...",
6508 label, fchdr.seq_cnt);
6509
6510 goto done;
6511 }
6512
6513 /* Find vport */
6514 for (i = 0; i < MAX_VPORTS; i++) {
6515 vport = &VPORT(i);
6516
6517 if (vport->did == fchdr.d_id) {
6518 port = vport;
6519 break;
6520 }
6521 }
6522
6523 if (i == MAX_VPORTS) {
6524 /* Allow unsol FLOGI & PLOGI for P2P */
6525 if ((fchdr.type != 1 /* ELS*/) ||
6526 ((fchdr.d_id != FABRIC_DID) &&
6527 !(hba->flag & FC_PT_TO_PT))) {
6528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6529 "RQ ENTRY: %s: Invalid did=%x. Dropping...",
6530 label, fchdr.d_id);
6531
6532 goto done;
6533 }
6534 }
6535
6536 /* Allocate an IOCBQ */
6537 iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba, MEM_IOCB);
6538
6539 if (!iocbq) {
6540 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6541 "RQ ENTRY: %s: Out of IOCB "
6542 "resources. Dropping...",
6543 label);
6544
6545 goto done;
6546 }
6547
6548 seq_mp = NULL;
6549 if (fchdr.type != 0) {
6550 /* Allocate a buffer */
6551 seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type);
6552
6553 if (!seq_mp) {
6554 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6555 "RQ ENTRY: %s: Out of buffer "
6556 "resources. Dropping...",
6557 label);
6558
6559 goto done;
6560 }
6561
6562 iocbq->bp = (uint8_t *)seq_mp;
6563 }
6564
6565 node = (void *)emlxs_node_find_did(port, fchdr.s_id, 1);
6566 if (node == NULL) {
6567 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6568 "RQ ENTRY: %s: Node not found. sid=%x",
6569 label, fchdr.s_id);
6570 }
6571
6572 /* Initialize the iocbq */
6573 iocbq->port = port;
6574 iocbq->channel = cp;
6575 iocbq->node = node;
6576
6577 iocb = &iocbq->iocb;
6578 iocb->RXSEQCNT = 0;
6579 iocb->RXSEQLEN = 0;
6580
6581 seq_len = 0;
6582 seq_cnt = 0;
6583
6584 } else {
6585
6586 iocb = &iocbq->iocb;
6587 port = iocbq->port;
6588 node = (emlxs_node_t *)iocbq->node;
6589
6590 seq_mp = (MATCHMAP *)iocbq->bp;
6591 seq_len = iocb->RXSEQLEN;
6592 seq_cnt = iocb->RXSEQCNT;
6593
6594 /* Check sequence order */
6595 if (fchdr.seq_cnt != seq_cnt) {
6596 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6597 "RQ ENTRY: %s: Out of order frame received "
6598 "(%d != %d). Dropping...",
6599 label, fchdr.seq_cnt, seq_cnt);
6600
6601 goto done;
6602 }
6603 }
6604
6605 /* We now have an iocbq */
6606
6607 if (!port->vpip->vfip) {
6608 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6609 "RQ ENTRY: %s: No fabric connection. "
6610 "Dropping...",
6611 label);
6612
6613 goto done;
6614 }
6615
6616 /* Save the frame data to our seq buffer */
6617 if (data_size && seq_mp) {
6618 /* Get the next data rqb */
6619 data_mp = &data_rq->rqb[host_index];
6620
6621 offset = (off_t)((uint64_t)((unsigned long)
6622 data_mp->virt) -
6623 (uint64_t)((unsigned long)
6624 hba->sli.sli4.slim2.virt));
6625
6626 EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
6627 data_size, DDI_DMA_SYNC_FORKERNEL);
6628
6629 data = (uint32_t *)data_mp->virt;
6630
6631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6632 "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
6633 host_index, data[0], data[1], data[2], data[3],
6634 data[4], data[5]);
6635
6636 /* Check sequence length */
6637 if ((seq_len + data_size) > seq_mp->size) {
6638 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6639 "RQ ENTRY: %s: Sequence buffer overflow. "
6640 "(%d > %d). Dropping...",
6641 label, (seq_len + data_size), seq_mp->size);
6642
6643 goto done;
6644 }
6645
6646 /* Copy data to local receive buffer */
6647 bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
6648 seq_len), data_size);
6649
6650 seq_len += data_size;
6651 }
6652
6653 /* If this is not the last frame of sequence, queue it. */
6654 if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
6655 /* Save sequence header */
6656 if (seq_cnt == 0) {
6657 bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
6658 sizeof (fc_frame_hdr_t));
6659 }
6660
6661 /* Update sequence info in iocb */
6662 iocb->RXSEQCNT = seq_cnt + 1;
6663 iocb->RXSEQLEN = seq_len;
6664
6665 /* Queue iocbq for next frame */
6666 emlxs_sli4_rxq_put(hba, iocbq);
6667
6668 /* Don't free resources */
6669 iocbq = NULL;
6670
6671 /* No need to abort */
6672 abort = 0;
6673
6674 goto done;
6675 }
6676
6677 emlxs_sli4_rq_post(port, hdr_rq->qid);
6678 posted = 1;
6679
6680 /* End of sequence found. Process request now. */
6681
6682 if (seq_cnt > 0) {
6683 /* Retrieve first frame of sequence */
6684 bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
6685 sizeof (fc_frame_hdr_t));
6686
6687 bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
6688 }
6689
6690 /* Build rcv iocb and process it */
6691 switch (fchdr.type) {
6692 case 0: /* BLS */
6693
6694 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6695 "RQ ENTRY: %s: oxid:%x rxid %x sid:%x. Sending BLS ACC...",
6696 label, fchdr.ox_id, fchdr.rx_id, fchdr.s_id);
6697
6698 /* Try to send abort response */
6699 if (!(pkt = emlxs_pkt_alloc(port, 0, 0, 0, KM_NOSLEEP))) {
6700 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6701 "RQ ENTRY: %s: Unable to alloc pkt. Dropping...",
6702 label);
6703 goto done;
6704 }
6705
6706 /* Setup sbp / iocb for driver initiated cmd */
6707 sbp = PKT2PRIV(pkt);
6708
6709 /* Free the temporary iocbq */
6710 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6711
6712 iocbq = (emlxs_iocbq_t *)&sbp->iocbq;
6713 iocbq->port = port;
6714 iocbq->channel = cp;
6715 iocbq->node = node;
6716
6717 sbp->pkt_flags &= ~PACKET_ULP_OWNED;
6718
6719 if (node) {
6720 sbp->node = node;
6721 sbp->did = node->nlp_DID;
6722 }
6723
6724 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
6725
6726 /* BLS ACC Response */
6727 wqe = &iocbq->wqe;
6728 bzero((void *)wqe, sizeof (emlxs_wqe_t));
6729
6730 iocbq->iocb.ULPCOMMAND = CMD_XMIT_BLS_RSP64_CX;
6731 wqe->Command = CMD_XMIT_BLS_RSP64_CX;
6732 wqe->CmdType = WQE_TYPE_GEN;
6733
6734 wqe->un.BlsRsp.Payload0 = 0x80;
6735 wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
6736
6737 wqe->un.BlsRsp.OXId = fchdr.ox_id;
6738 wqe->un.BlsRsp.RXId = fchdr.rx_id;
6739
6740 wqe->un.BlsRsp.SeqCntLow = 0;
6741 wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
6742
6743 wqe->un.BlsRsp.XO = ((fchdr.f_ctl & F_CTL_XCHG_CONTEXT)? 1:0);
6744 wqe->un.BlsRsp.AR = 0;
6745
6746 rpip = EMLXS_NODE_TO_RPI(port, node);
6747
6748 if (rpip) {
6749 wqe->ContextType = WQE_RPI_CONTEXT;
6750 wqe->ContextTag = rpip->RPI;
6751 } else {
6752 wqe->ContextType = WQE_VPI_CONTEXT;
6753 wqe->ContextTag = port->vpip->VPI;
6754
6755 rpip = emlxs_rpi_reserve_notify(port, fchdr.s_id, 0);
6756
6757 if (!rpip) {
6758 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6759 "RQ ENTRY: %s: Unable to alloc "
6760 "reserved RPI. Dropping...",
6761 label);
6762
6763 goto done;
6764 }
6765
6766 /* Store the reserved rpi */
6767 wqe->CmdSpecific = rpip->RPI;
6768
6769 wqe->un.BlsRsp.RemoteId = fchdr.s_id;
6770 wqe->un.BlsRsp.LocalId = fchdr.d_id;
6771 }
6772
6773 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6774 wqe->CCPE = 1;
6775 wqe->CCP = fchdr.rsvd;
6776 }
6777
6778 /* Allocate an exchange for this command */
6779 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
6780 EMLXS_XRI_SOL_BLS_TYPE);
6781
6782 if (!xrip) {
6783 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6784 "RQ ENTRY: %s: Unable to alloc XRI. Dropping...",
6785 label);
6786 goto done;
6787 }
6788
6789 wqe->XRITag = xrip->XRI;
6790 wqe->Class = CLASS3;
6791 wqe->RequestTag = xrip->iotag;
6792 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
6793
6794 sbp->ticks = hba->timer_tics + 30;
6795
6796 emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
6797
6798 /* The temporary iocbq has been freed already */
6799 iocbq = NULL;
6800
6801 break;
6802
6803 case 1: /* ELS */
6804 cmd = *((uint32_t *)seq_mp->virt);
6805 cmd &= ELS_CMD_MASK;
6806
6807 if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED)) {
6808 uint32_t dropit = 1;
6809
6810 /* Allow for P2P handshaking */
6811 switch (cmd) {
6812 case ELS_CMD_FLOGI:
6813 dropit = 0;
6814 break;
6815
6816 case ELS_CMD_PLOGI:
6817 case ELS_CMD_PRLI:
6818 if (hba->flag & FC_PT_TO_PT) {
6819 dropit = 0;
6820 }
6821 break;
6822 }
6823
6824 if (dropit) {
6825 EMLXS_MSGF(EMLXS_CONTEXT,
6826 &emlxs_sli_detail_msg,
6827 "RQ ENTRY: %s: Port not yet enabled. "
6828 "Dropping...",
6829 label);
6830 goto done;
6831 }
6832 }
6833
6834 rpip = NULL;
6835
6836 if (cmd != ELS_CMD_LOGO) {
6837 rpip = EMLXS_NODE_TO_RPI(port, node);
6838 }
6839
6840 if (!rpip) {
6841 /* Use the fabric rpi */
6842 rpip = port->vpip->fabric_rpip;
6843 }
6844
6845 xrip = emlxs_sli4_reserve_xri(port, rpip,
6846 EMLXS_XRI_UNSOL_ELS_TYPE, fchdr.ox_id);
6847
6848 if (!xrip) {
6849 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6850 "RQ ENTRY: %s: Out of exchange "
6851 "resources. Dropping...",
6852 label);
6853
6854 goto done;
6855 }
6856
6857 /* Build CMD_RCV_ELS64_CX */
6858 iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
6859 iocb->un.rcvels64.elsReq.tus.f.bdeSize = seq_len;
6860 iocb->un.rcvels64.elsReq.addrLow = PADDR_LO(seq_mp->phys);
6861 iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
6862 iocb->ULPBDECOUNT = 1;
6863
6864 iocb->un.rcvels64.remoteID = fchdr.s_id;
6865 iocb->un.rcvels64.parmRo = fchdr.d_id;
6866
6867 iocb->ULPPU = 0x3;
6868 iocb->ULPCONTEXT = xrip->XRI;
6869 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6870 iocb->ULPCLASS = CLASS3;
6871 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6872
6873 iocb->unsli3.ext_rcv.seq_len = seq_len;
6874 iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6875 iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6876
6877 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6878 iocb->unsli3.ext_rcv.ccpe = 1;
6879 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6880 }
6881
6882 if (port->mode == MODE_INITIATOR) {
6883 (void) emlxs_els_handle_unsol_req(port, iocbq->channel,
6884 iocbq, seq_mp, seq_len);
6885 }
6886 #ifdef SFCT_SUPPORT
6887 else if (port->mode == MODE_TARGET) {
6888 (void) emlxs_fct_handle_unsol_els(port, iocbq->channel,
6889 iocbq, seq_mp, seq_len);
6890 }
6891 #endif /* SFCT_SUPPORT */
6892 break;
6893
6894 #ifdef SFCT_SUPPORT
6895 case 8: /* FCT */
6896 if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
6897 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6898 "RQ ENTRY: %s: Port not yet enabled. "
6899 "Dropping...",
6900 label);
6901
6902 goto done;
6903 }
6904
6905 rpip = EMLXS_NODE_TO_RPI(port, node);
6906
6907 if (!rpip) {
6908 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6909 "RQ ENTRY: %s: Port not logged in. "
6910 "Dropping...",
6911 label);
6912
6913 goto done;
6914 }
6915
6916 xrip = emlxs_sli4_reserve_xri(port, rpip,
6917 EMLXS_XRI_UNSOL_FCP_TYPE, fchdr.ox_id);
6918
6919 if (!xrip) {
6920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6921 "RQ ENTRY: %s: Out of exchange "
6922 "resources. Dropping...",
6923 label);
6924
6925 goto done;
6926 }
6927
6928 /* Build CMD_RCV_SEQUENCE64_CX */
6929 iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6930 iocb->un.rcvseq64.rcvBde.tus.f.bdeSize = seq_len;
6931 iocb->un.rcvseq64.rcvBde.addrLow = PADDR_LO(seq_mp->phys);
6932 iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6933 iocb->ULPBDECOUNT = 1;
6934
6935 iocb->ULPPU = 0x3;
6936 iocb->ULPCONTEXT = xrip->XRI;
6937 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6938 iocb->ULPCLASS = CLASS3;
6939 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6940
6941 iocb->unsli3.ext_rcv.seq_len = seq_len;
6942 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
6943 iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6944
6945 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6946 iocb->unsli3.ext_rcv.ccpe = 1;
6947 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6948 }
6949
6950 /* pass xrip to FCT in the iocbq */
6951 iocbq->sbp = xrip;
6952
6953 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6954 seq_mp, seq_len);
6955 break;
6956 #endif /* SFCT_SUPPORT */
6957
6958 case 0x20: /* CT */
6959 if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6960 !(hba->flag & FC_LOOPBACK_MODE)) {
6961 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6962 "RQ ENTRY: %s: Port not yet enabled. "
6963 "Dropping...",
6964 label);
6965
6966 goto done;
6967 }
6968
6969 if (!node) {
6970 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6971 "RQ ENTRY: %s: Node not found (did=%x). "
6972 "Dropping...",
6973 label, fchdr.d_id);
6974
6975 goto done;
6976 }
6977
6978 rpip = EMLXS_NODE_TO_RPI(port, node);
6979
6980 if (!rpip) {
6981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6982 "RQ ENTRY: %s: RPI not found (did=%x rpi=%d). "
6983 "Dropping...",
6984 label, fchdr.d_id, node->nlp_Rpi);
6985
6986 goto done;
6987 }
6988
6989 xrip = emlxs_sli4_reserve_xri(port, rpip,
6990 EMLXS_XRI_UNSOL_CT_TYPE, fchdr.ox_id);
6991
6992 if (!xrip) {
6993 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6994 "RQ ENTRY: %s: Out of exchange "
6995 "resources. Dropping...",
6996 label);
6997
6998 goto done;
6999 }
7000
7001 /* Build CMD_RCV_SEQ64_CX */
7002 iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
7003 iocb->un.rcvseq64.rcvBde.tus.f.bdeSize = seq_len;
7004 iocb->un.rcvseq64.rcvBde.addrLow = PADDR_LO(seq_mp->phys);
7005 iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
7006 iocb->ULPBDECOUNT = 1;
7007
7008 iocb->un.rcvseq64.xrsqRo = 0;
7009 iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
7010 iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
7011 iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
7012 iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
7013
7014 iocb->ULPPU = 0x3;
7015 iocb->ULPCONTEXT = xrip->XRI;
7016 iocb->ULPIOTAG = rpip->RPI;
7017 iocb->ULPCLASS = CLASS3;
7018 iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
7019
7020 iocb->unsli3.ext_rcv.seq_len = seq_len;
7021 iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
7022
7023 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
7024 iocb->unsli3.ext_rcv.ccpe = 1;
7025 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
7026 }
7027
7028 (void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
7029 iocbq, seq_mp, seq_len);
7030
7031 break;
7032 }
7033
7034 /* Sequence handled, no need to abort */
7035 abort = 0;
7036
7037 done:
7038
7039 if (!posted) {
7040 emlxs_sli4_rq_post(port, hdr_rq->qid);
7041 }
7042
7043 if (abort) {
7044 /* Send ABTS for this exchange */
7045 /* !!! Currently, we have no implementation for this !!! */
7046 abort = 0;
7047 }
7048
7049 /* Return memory resources to pools */
7050 if (iocbq) {
7051 if (iocbq->bp) {
7052 emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
7053 iocbq->bp = 0;
7054 }
7055
7056 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
7057 }
7058
7059 #ifdef FMA_SUPPORT
7060 if (emlxs_fm_check_dma_handle(hba,
7061 hba->sli.sli4.slim2.dma_handle)
7062 != DDI_FM_OK) {
7063 EMLXS_MSGF(EMLXS_CONTEXT,
7064 &emlxs_invalid_dma_handle_msg,
7065 "sli4_process_unsol_rcv: hdl=%p",
7066 hba->sli.sli4.slim2.dma_handle);
7067
7068 emlxs_thread_spawn(hba, emlxs_restart_thread,
7069 0, 0);
7070 }
7071 #endif
7072 return;
7073
7074 } /* emlxs_sli4_process_unsol_rcv() */
7075
7076
7077 /*ARGSUSED*/
7078 static void
emlxs_sli4_process_xri_aborted(emlxs_hba_t * hba,CQ_DESC_t * cq,CQE_XRI_Abort_t * cqe)7079 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
7080 CQE_XRI_Abort_t *cqe)
7081 {
7082 emlxs_port_t *port = &PPORT;
7083 XRIobj_t *xrip;
7084
7085 mutex_enter(&EMLXS_FCTAB_LOCK);
7086
7087 xrip = emlxs_sli4_find_xri(port, cqe->XRI);
7088 if (xrip == NULL) {
7089 /* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg, */
7090 /* "CQ ENTRY: process xri aborted ignored"); */
7091
7092 mutex_exit(&EMLXS_FCTAB_LOCK);
7093 return;
7094 }
7095
7096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7097 "CQ ENTRY: XRI Aborted: xri=%d IA=%d EO=%d BR=%d",
7098 cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
7099
7100 if (!(xrip->flag & EMLXS_XRI_BUSY)) {
7101 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7102 "CQ ENTRY: XRI Aborted: xri=%d flag=%x. Bad state.",
7103 xrip->XRI, xrip->flag);
7104
7105 mutex_exit(&EMLXS_FCTAB_LOCK);
7106 return;
7107 }
7108
7109 /* Exchange is no longer busy on-chip, free it */
7110 emlxs_sli4_free_xri(port, 0, xrip, 0);
7111
7112 mutex_exit(&EMLXS_FCTAB_LOCK);
7113
7114 return;
7115
7116 } /* emlxs_sli4_process_xri_aborted () */
7117
7118
7119 /*ARGSUSED*/
7120 static void
emlxs_sli4_process_cq(emlxs_hba_t * hba,CQ_DESC_t * cq)7121 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
7122 {
7123 emlxs_port_t *port = &PPORT;
7124 CQE_u *cqe;
7125 CQE_u cq_entry;
7126 int num_entries = 0;
7127 off_t offset;
7128
7129 /* EMLXS_PORT_LOCK must be held when entering this routine */
7130
7131 cqe = (CQE_u *)cq->addr.virt;
7132 cqe += cq->host_index;
7133
7134 offset = (off_t)((uint64_t)((unsigned long)
7135 cq->addr.virt) -
7136 (uint64_t)((unsigned long)
7137 hba->sli.sli4.slim2.virt));
7138
7139 EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
7140 4096, DDI_DMA_SYNC_FORKERNEL);
7141
7142 for (;;) {
7143 cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
7144 if (((cq_entry.word[3]>>31) & 0x01) != cq->qe_valid) {
7145 #ifdef DEBUG_CQE
7146 if (num_entries == 0) {
7147 EMLXS_MSGF(EMLXS_CONTEXT,
7148 &emlxs_sli_detail_msg, "CQE: Invalid CQE:"
7149 " eqid=%x cqid=%x cqe=%p %08x %08x %08x"
7150 " %08x. host_index=%x valid=%d Break...",
7151 cq->eqid, cq->qid, cqe,
7152 cqe->word[0], cqe->word[1],
7153 cqe->word[2], cqe->word[3],
7154 cq->host_index, cq->qe_valid);
7155 }
7156 #endif /* DEBUG_CQE */
7157 break;
7158 }
7159
7160 cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
7161 cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
7162 cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
7163
7164 #ifdef DEBUG_CQE
7165 emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 6, 0);
7166 #endif /* DEBUG_CQE */
7167 num_entries++;
7168 if (hba->sli.sli4.param.CqAV)
7169 /* do not attach the valid bit */
7170 cqe->word[3] &= BE_SWAP32(CQE_VALID);
7171 else
7172 cqe->word[3] = 0;
7173
7174 cq->host_index++;
7175 if (cq->host_index >= cq->max_index) {
7176 cq->host_index = 0;
7177 cqe = (CQE_u *)cq->addr.virt;
7178 if (hba->sli.sli4.param.CqAV)
7179 cq->qe_valid ^= 1;
7180 } else {
7181 cqe++;
7182 }
7183 mutex_exit(&EMLXS_PORT_LOCK);
7184
7185 /* Now handle specific cq type */
7186 if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
7187 if (cq_entry.cqAsyncEntry.async_evt) {
7188 emlxs_sli4_process_async_event(hba,
7189 (CQE_ASYNC_t *)&cq_entry);
7190 } else {
7191 emlxs_sli4_process_mbox_event(hba,
7192 (CQE_MBOX_t *)&cq_entry);
7193 }
7194 } else { /* EMLXS_CQ_TYPE_GROUP2 */
7195 switch (cq_entry.cqCmplEntry.Code) {
7196 case CQE_TYPE_WQ_COMPLETION:
7197 if (cq_entry.cqCmplEntry.RequestTag <
7198 hba->max_iotag) {
7199 emlxs_sli4_process_wqe_cmpl(hba, cq,
7200 (CQE_CmplWQ_t *)&cq_entry);
7201 } else {
7202 emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
7203 (CQE_CmplWQ_t *)&cq_entry);
7204 }
7205 break;
7206 case CQE_TYPE_RELEASE_WQE:
7207 emlxs_sli4_process_release_wqe(hba, cq,
7208 (CQE_RelWQ_t *)&cq_entry);
7209 break;
7210 case CQE_TYPE_UNSOL_RCV:
7211 case CQE_TYPE_UNSOL_RCV_V1:
7212 emlxs_sli4_process_unsol_rcv(hba, cq,
7213 (CQE_UnsolRcv_t *)&cq_entry);
7214 break;
7215 case CQE_TYPE_XRI_ABORTED:
7216 emlxs_sli4_process_xri_aborted(hba, cq,
7217 (CQE_XRI_Abort_t *)&cq_entry);
7218 break;
7219 default:
7220 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7221 "Invalid CQ entry eqid=%x qid=%x code=%d: "
7222 "%08x %08x %08x %08x, host_index=%x "
7223 "valid=%d",
7224 cq->eqid, cq->qid,
7225 cq_entry.cqCmplEntry.Code, cq_entry.word[0],
7226 cq_entry.word[1], cq_entry.word[2],
7227 cq_entry.word[3], cq->host_index,
7228 cq->qe_valid);
7229 break;
7230 }
7231 }
7232
7233 mutex_enter(&EMLXS_PORT_LOCK);
7234 }
7235
7236 /* Number of times this routine gets called for this CQ */
7237 cq->isr_count++;
7238
7239 /* num_entries is the number of CQEs we process in this specific CQ */
7240 cq->num_proc += num_entries;
7241 if (cq->max_proc < num_entries)
7242 cq->max_proc = num_entries;
7243
7244 emlxs_sli4_write_cqdb(hba, cq->qid, num_entries, B_TRUE);
7245
7246 /* EMLXS_PORT_LOCK must be held when exiting this routine */
7247
7248 } /* emlxs_sli4_process_cq() */
7249
7250
7251 /*ARGSUSED*/
7252 static void
emlxs_sli4_process_eq(emlxs_hba_t * hba,EQ_DESC_t * eq)7253 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
7254 {
7255 emlxs_port_t *port = &PPORT;
7256 uint32_t *ptr;
7257 CHANNEL *cp;
7258 EQE_u eqe;
7259 uint32_t i;
7260 uint16_t cqi;
7261 int num_entries = 0;
7262 off_t offset;
7263
7264 /* EMLXS_PORT_LOCK must be held when entering this routine */
7265
7266 hba->intr_busy_cnt ++;
7267
7268 ptr = eq->addr.virt;
7269 ptr += eq->host_index;
7270
7271 offset = (off_t)((uint64_t)((unsigned long)
7272 eq->addr.virt) -
7273 (uint64_t)((unsigned long)
7274 hba->sli.sli4.slim2.virt));
7275
7276 EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
7277 4096, DDI_DMA_SYNC_FORKERNEL);
7278
7279 for (;;) {
7280 eqe.word = *ptr;
7281 eqe.word = BE_SWAP32(eqe.word);
7282
7283 if ((eqe.word & EQE_VALID) != eq->qe_valid) {
7284 #ifdef DEBUG_FASTPATH
7285 if (num_entries == 0) {
7286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7287 "EQE: Invalid EQE: %x. host_index=%x "
7288 "valid=%d Break...",
7289 eqe.word, eq->qe_valid);
7290 }
7291 #endif /* DEBUG_FASTPATH */
7292 break;
7293 }
7294
7295 #ifdef DEBUG_FASTPATH
7296 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7297 "EQE00: %08x", eqe.word);
7298 #endif /* DEBUG_FASTPATH */
7299
7300 if (hba->sli.sli4.param.EqAV)
7301 *ptr &= BE_SWAP32(EQE_VALID);
7302 else
7303 *ptr = 0;
7304 num_entries++;
7305 eq->host_index++;
7306 if (eq->host_index >= eq->max_index) {
7307 eq->host_index = 0;
7308 ptr = eq->addr.virt;
7309 if (hba->sli.sli4.param.EqAV)
7310 eq->qe_valid ^= 1;
7311 } else {
7312 ptr++;
7313 }
7314
7315 cqi = emlxs_sli4_cqid_to_index(hba, eqe.entry.CQId);
7316
7317 /* Verify CQ index */
7318 if (cqi == 0xffff) {
7319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7320 "EQE: Invalid CQid: %d. valid=%d Dropping...",
7321 eqe.entry.CQId, eq->qe_valid);
7322 continue;
7323 }
7324
7325 #ifdef DEBUG_FASTPATH
7326 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7327 "EQE: qid=%x host_index=%x valid=%d iptr=%p CQIndex:%x "
7328 "cqid:%x",
7329 eq->qid, eq->host_index, eq->qe_valid, ptr, cqi,
7330 eqe.entry.CQId);
7331 #endif /* DEBUG_FASTPATH */
7332
7333 emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[cqi]);
7334 }
7335
7336 /* Number of times the ISR for this EQ gets called */
7337 eq->isr_count++;
7338
7339 /* num_entries is the number of EQEs we process in this specific ISR */
7340 eq->num_proc += num_entries;
7341 if (eq->max_proc < num_entries) {
7342 eq->max_proc = num_entries;
7343 }
7344
7345 if (num_entries != 0) {
7346 for (i = 0; i < hba->chan_count; i++) {
7347 cp = &hba->chan[i];
7348 if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
7349 cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
7350 emlxs_thread_trigger2(&cp->intr_thread,
7351 emlxs_proc_channel, cp);
7352 }
7353 }
7354 }
7355
7356 emlxs_sli4_write_eqdb(hba, eq->qid, num_entries, B_TRUE);
7357
7358 /* EMLXS_PORT_LOCK must be held when exiting this routine */
7359
7360 hba->intr_busy_cnt --;
7361
7362 } /* emlxs_sli4_process_eq() */
7363
7364
7365 #ifdef MSI_SUPPORT
7366 /*ARGSUSED*/
7367 static uint32_t
emlxs_sli4_msi_intr(char * arg1,char * arg2)7368 emlxs_sli4_msi_intr(char *arg1, char *arg2)
7369 {
7370 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
7371 #ifdef DEBUG_FASTPATH
7372 emlxs_port_t *port = &PPORT;
7373 #endif /* DEBUG_FASTPATH */
7374 uint16_t msgid;
7375 int rc;
7376
7377 #ifdef DEBUG_FASTPATH
7378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7379 "msiINTR arg1:%p arg2:%p", arg1, arg2);
7380 #endif /* DEBUG_FASTPATH */
7381
7382 /* Check for legacy interrupt handling */
7383 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
7384 rc = emlxs_sli4_intx_intr(arg1);
7385 return (rc);
7386 }
7387
7388 /* Get MSI message id */
7389 msgid = (uint16_t)((unsigned long)arg2);
7390
7391 /* Validate the message id */
7392 if (msgid >= hba->intr_count) {
7393 msgid = 0;
7394 }
7395 mutex_enter(&EMLXS_PORT_LOCK);
7396
7397 if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7398 mutex_exit(&EMLXS_PORT_LOCK);
7399 return (DDI_INTR_UNCLAIMED);
7400 }
7401
7402 /* The eq[] index == the MSI vector number */
7403 emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
7404
7405 mutex_exit(&EMLXS_PORT_LOCK);
7406 return (DDI_INTR_CLAIMED);
7407
7408 } /* emlxs_sli4_msi_intr() */
7409 #endif /* MSI_SUPPORT */
7410
7411
7412 /*ARGSUSED*/
7413 static int
emlxs_sli4_intx_intr(char * arg)7414 emlxs_sli4_intx_intr(char *arg)
7415 {
7416 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
7417 #ifdef DEBUG_FASTPATH
7418 emlxs_port_t *port = &PPORT;
7419 #endif /* DEBUG_FASTPATH */
7420
7421 #ifdef DEBUG_FASTPATH
7422 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7423 "intxINTR arg:%p", arg);
7424 #endif /* DEBUG_FASTPATH */
7425
7426 mutex_enter(&EMLXS_PORT_LOCK);
7427
7428 if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7429 mutex_exit(&EMLXS_PORT_LOCK);
7430 return (DDI_INTR_UNCLAIMED);
7431 }
7432
7433 emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
7434
7435 mutex_exit(&EMLXS_PORT_LOCK);
7436 return (DDI_INTR_CLAIMED);
7437 } /* emlxs_sli4_intx_intr() */
7438
7439
7440 static void
emlxs_sli4_hba_kill(emlxs_hba_t * hba)7441 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
7442 {
7443 emlxs_port_t *port = &PPORT;
7444 uint32_t j;
7445
7446 mutex_enter(&EMLXS_PORT_LOCK);
7447 if (hba->flag & FC_INTERLOCKED) {
7448 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7449
7450 mutex_exit(&EMLXS_PORT_LOCK);
7451
7452 return;
7453 }
7454
7455 j = 0;
7456 while (j++ < 10000) {
7457 if ((hba->mbox_queue_flag == 0) &&
7458 (hba->intr_busy_cnt == 0)) {
7459 break;
7460 }
7461
7462 mutex_exit(&EMLXS_PORT_LOCK);
7463 BUSYWAIT_US(100);
7464 mutex_enter(&EMLXS_PORT_LOCK);
7465 }
7466
7467 if ((hba->mbox_queue_flag != 0) || (hba->intr_busy_cnt > 0)) {
7468 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7469 "Board kill failed. Adapter busy, %d, %d.",
7470 hba->mbox_queue_flag, hba->intr_busy_cnt);
7471 mutex_exit(&EMLXS_PORT_LOCK);
7472 return;
7473 }
7474
7475 hba->flag |= FC_INTERLOCKED;
7476
7477 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7478
7479 mutex_exit(&EMLXS_PORT_LOCK);
7480
7481 } /* emlxs_sli4_hba_kill() */
7482
7483
7484 extern void
emlxs_sli4_hba_reset_all(emlxs_hba_t * hba,uint32_t flag)7485 emlxs_sli4_hba_reset_all(emlxs_hba_t *hba, uint32_t flag)
7486 {
7487 emlxs_port_t *port = &PPORT;
7488 uint32_t value;
7489
7490 mutex_enter(&EMLXS_PORT_LOCK);
7491
7492 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_2 &&
7493 (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_6) {
7494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7495 "Reset All failed. Invalid Operation.");
7496 mutex_exit(&EMLXS_PORT_LOCK);
7497 return;
7498 }
7499
7500 /* Issue a Firmware Reset All Request */
7501 if (flag) {
7502 value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL | SLI_PHYDEV_DD;
7503 } else {
7504 value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL;
7505 }
7506
7507 ddi_put32(hba->sli.sli4.bar0_acc_handle,
7508 hba->sli.sli4.PHYSDEV_reg_addr, value);
7509
7510 mutex_exit(&EMLXS_PORT_LOCK);
7511
7512 } /* emlxs_sli4_hba_reset_all() */
7513
7514
7515 static void
emlxs_sli4_enable_intr(emlxs_hba_t * hba)7516 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
7517 {
7518 emlxs_config_t *cfg = &CFG;
7519 int i;
7520 int num_cq;
7521
7522 hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
7523
7524 num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
7525 EMLXS_CQ_OFFSET_WQ;
7526
7527 /* ARM EQ / CQs */
7528 for (i = 0; i < num_cq; i++) {
7529 emlxs_sli4_write_cqdb(hba, hba->sli.sli4.cq[i].qid, 0, B_TRUE);
7530 }
7531
7532 for (i = 0; i < hba->intr_count; i++) {
7533 emlxs_sli4_write_eqdb(hba, hba->sli.sli4.eq[i].qid, 0, B_TRUE);
7534 }
7535 } /* emlxs_sli4_enable_intr() */
7536
7537
7538 static void
emlxs_sli4_disable_intr(emlxs_hba_t * hba,uint32_t att)7539 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
7540 {
7541 if (att) {
7542 return;
7543 }
7544
7545 hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7546
7547 /* Short of reset, we cannot disable interrupts */
7548 } /* emlxs_sli4_disable_intr() */
7549
7550 static void
emlxs_sli4_resource_free(emlxs_hba_t * hba)7551 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7552 {
7553 emlxs_port_t *port = &PPORT;
7554 MBUF_INFO *buf_info;
7555 uint32_t i;
7556
7557 buf_info = &hba->sli.sli4.slim2;
7558 if (buf_info->virt == 0) {
7559 /* Already free */
7560 return;
7561 }
7562
7563 emlxs_fcf_fini(hba);
7564
7565 mutex_enter(&EMLXS_PORT_LOCK);
7566
7567 buf_info = &hba->sli.sli4.HeaderTmplate;
7568 if (buf_info->virt) {
7569 bzero(buf_info, sizeof (MBUF_INFO));
7570 }
7571
7572 if (hba->sli.sli4.XRIp) {
7573 XRIobj_t *xrip;
7574
7575 if ((hba->sli.sli4.XRIinuse_f !=
7576 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7577 (hba->sli.sli4.XRIinuse_b !=
7578 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7579 xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
7580 while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
7581 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7582 "XRIs in use during free!: %p %p != %p "
7583 "XRI:%d iotag:%d\n",
7584 hba->sli.sli4.XRIinuse_f,
7585 hba->sli.sli4.XRIinuse_b, xrip, xrip->XRI,
7586 xrip->iotag);
7587 xrip = xrip->_f;
7588 }
7589 }
7590
7591 xrip = hba->sli.sli4.XRIp;
7592 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7593 xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7594
7595 if (xrip->XRI != 0)
7596 emlxs_mem_put(hba, xrip->SGSeg, xrip->SGList);
7597
7598 xrip++;
7599 }
7600
7601 kmem_free(hba->sli.sli4.XRIp,
7602 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7603 hba->sli.sli4.XRIp = NULL;
7604
7605 hba->sli.sli4.XRIfree_f =
7606 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7607 hba->sli.sli4.XRIfree_b =
7608 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7609 hba->sli.sli4.xrif_count = 0;
7610 }
7611
7612 for (i = 0; i < hba->intr_count; i++) {
7613 mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7614 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7615 hba->sli.sli4.eq[i].qid = 0xffff;
7616 }
7617 for (i = 0; i < EMLXS_MAX_CQS; i++) {
7618 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7619 hba->sli.sli4.cq[i].qid = 0xffff;
7620 }
7621 for (i = 0; i < EMLXS_MAX_WQS; i++) {
7622 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7623 hba->sli.sli4.wq[i].qid = 0xffff;
7624 }
7625 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7626 mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7627 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7628 }
7629 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7630 mutex_destroy(&hba->sli.sli4.rq[i].lock);
7631 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7632 hba->sli.sli4.rq[i].qid = 0xffff;
7633 }
7634
7635 /* Free the MQ */
7636 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7637
7638
7639 buf_info = &hba->sli.sli4.slim2;
7640 if (buf_info->virt) {
7641 buf_info->flags = FC_MBUF_DMA;
7642 emlxs_mem_free(hba, buf_info);
7643 bzero(buf_info, sizeof (MBUF_INFO));
7644 }
7645
7646 mutex_exit(&EMLXS_PORT_LOCK);
7647
7648 /* GPIO lock */
7649 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7650 mutex_destroy(&hba->gpio_lock);
7651
7652
7653 } /* emlxs_sli4_resource_free() */
7654
7655 static int
emlxs_sli4_resource_alloc(emlxs_hba_t * hba)7656 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7657 {
7658 emlxs_port_t *port = &PPORT;
7659 emlxs_config_t *cfg = &CFG;
7660 MBUF_INFO *buf_info;
7661 int num_eq;
7662 int num_wq;
7663 uint16_t i;
7664 uint32_t j;
7665 uint32_t k;
7666 uint16_t cq_depth;
7667 uint32_t cq_size;
7668 uint32_t word;
7669 XRIobj_t *xrip;
7670 RQE_t *rqe;
7671 MBUF_INFO *rqb;
7672 uint64_t phys;
7673 uint64_t tmp_phys;
7674 char *virt;
7675 char *tmp_virt;
7676 void *data_handle;
7677 void *dma_handle;
7678 int32_t size;
7679 off_t offset;
7680 uint32_t count = 0;
7681 uint32_t hddr_size = 0;
7682 uint32_t align;
7683 uint32_t iotag;
7684 uint32_t mseg;
7685
7686 buf_info = &hba->sli.sli4.slim2;
7687 if (buf_info->virt) {
7688 /* Already allocated */
7689 return (0);
7690 }
7691
7692 emlxs_fcf_init(hba);
7693
7694 switch (hba->sli.sli4.param.CQV) {
7695 case 0:
7696 cq_depth = CQ_DEPTH;
7697 break;
7698 case 2:
7699 default:
7700 cq_depth = CQ_DEPTH_V2;
7701 break;
7702 }
7703 cq_size = (cq_depth * CQE_SIZE);
7704
7705 /* EQs - 1 per Interrupt vector */
7706 num_eq = hba->intr_count;
7707
7708 /* CQs - number of WQs + 1 for RQs + 1 for mbox/async events */
7709 num_wq = cfg[CFG_NUM_WQ].current * num_eq;
7710
7711 /* Calculate total dmable memory we need */
7712 /* WARNING: make sure each section is aligned on 4K boundary */
7713
7714 /* EQ */
7715 count += num_eq * 4096;
7716
7717 /* CQ */
7718 count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7719
7720 /* WQ */
7721 count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7722
7723 /* MQ */
7724 count += EMLXS_MAX_MQS * 4096;
7725
7726 /* RQ */
7727 count += EMLXS_MAX_RQS * 4096;
7728
7729 /* RQB/E */
7730 count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7731 count += (4096 - (count%4096)); /* Ensure 4K alignment */
7732
7733 /* RPI Header Templates */
7734 if (hba->sli.sli4.param.HDRR) {
7735 /* Bytes per extent */
7736 j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7737
7738 /* Pages required per extent (page == 4096 bytes) */
7739 k = (j/4096) + ((j%4096)? 1:0);
7740
7741 /* Total size */
7742 hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7743
7744 count += hddr_size;
7745 }
7746
7747 /* Allocate slim2 for SLI4 */
7748 buf_info = &hba->sli.sli4.slim2;
7749 buf_info->size = count;
7750 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7751 buf_info->align = ddi_ptob(hba->dip, 1L);
7752
7753 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7754 "Allocating memory for slim2: %d", count);
7755
7756 (void) emlxs_mem_alloc(hba, buf_info);
7757
7758 if (buf_info->virt == NULL) {
7759 EMLXS_MSGF(EMLXS_CONTEXT,
7760 &emlxs_init_failed_msg,
7761 "Unable to allocate internal memory for SLI4: %d",
7762 count);
7763 goto failed;
7764 }
7765 bzero(buf_info->virt, buf_info->size);
7766 EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7767 buf_info->size, DDI_DMA_SYNC_FORDEV);
7768
7769 /* Assign memory to Head Template, EQ, CQ, WQ, RQ and MQ */
7770 data_handle = buf_info->data_handle;
7771 dma_handle = buf_info->dma_handle;
7772 phys = buf_info->phys;
7773 virt = (char *)buf_info->virt;
7774
7775 /* Allocate space for queues */
7776
7777 /* EQ */
7778 size = 4096;
7779 for (i = 0; i < num_eq; i++) {
7780 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7781
7782 buf_info = &hba->sli.sli4.eq[i].addr;
7783 buf_info->size = size;
7784 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7785 buf_info->align = ddi_ptob(hba->dip, 1L);
7786 buf_info->phys = phys;
7787 buf_info->virt = (void *)virt;
7788 buf_info->data_handle = data_handle;
7789 buf_info->dma_handle = dma_handle;
7790
7791 phys += size;
7792 virt += size;
7793
7794 hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
7795 hba->sli.sli4.eq[i].qid = 0xffff;
7796
7797 mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, NULL,
7798 MUTEX_DRIVER, NULL);
7799 hba->sli.sli4.eq[i].qe_valid = 1;
7800 }
7801
7802
7803 /* CQ */
7804 for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7805 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7806
7807 buf_info = &hba->sli.sli4.cq[i].addr;
7808 buf_info->size = cq_size;
7809 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7810 buf_info->align = ddi_ptob(hba->dip, 1L);
7811 buf_info->phys = phys;
7812 buf_info->virt = (void *)virt;
7813 buf_info->data_handle = data_handle;
7814 buf_info->dma_handle = dma_handle;
7815
7816 phys += cq_size;
7817 virt += cq_size;
7818
7819 hba->sli.sli4.cq[i].max_index = cq_depth;
7820 hba->sli.sli4.cq[i].qid = 0xffff;
7821 hba->sli.sli4.cq[i].qe_valid = 1;
7822 }
7823
7824
7825 /* WQ */
7826 size = 4096 * EMLXS_NUM_WQ_PAGES;
7827 for (i = 0; i < num_wq; i++) {
7828 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7829
7830 buf_info = &hba->sli.sli4.wq[i].addr;
7831 buf_info->size = size;
7832 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7833 buf_info->align = ddi_ptob(hba->dip, 1L);
7834 buf_info->phys = phys;
7835 buf_info->virt = (void *)virt;
7836 buf_info->data_handle = data_handle;
7837 buf_info->dma_handle = dma_handle;
7838
7839 phys += size;
7840 virt += size;
7841
7842 hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
7843 hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
7844 hba->sli.sli4.wq[i].qid = 0xFFFF;
7845 }
7846
7847
7848 /* MQ */
7849 size = 4096;
7850 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7851
7852 buf_info = &hba->sli.sli4.mq.addr;
7853 buf_info->size = size;
7854 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7855 buf_info->align = ddi_ptob(hba->dip, 1L);
7856 buf_info->phys = phys;
7857 buf_info->virt = (void *)virt;
7858 buf_info->data_handle = data_handle;
7859 buf_info->dma_handle = dma_handle;
7860
7861 phys += size;
7862 virt += size;
7863
7864 hba->sli.sli4.mq.max_index = MQ_DEPTH;
7865
7866
7867 /* RXQ */
7868 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7869 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7870
7871 mutex_init(&hba->sli.sli4.rxq[i].lock, NULL, MUTEX_DRIVER,
7872 NULL);
7873 }
7874
7875
7876 /* RQ */
7877 size = 4096;
7878 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7879 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7880
7881 buf_info = &hba->sli.sli4.rq[i].addr;
7882 buf_info->size = size;
7883 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7884 buf_info->align = ddi_ptob(hba->dip, 1L);
7885 buf_info->phys = phys;
7886 buf_info->virt = (void *)virt;
7887 buf_info->data_handle = data_handle;
7888 buf_info->dma_handle = dma_handle;
7889
7890 phys += size;
7891 virt += size;
7892
7893 hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
7894 hba->sli.sli4.rq[i].qid = 0xFFFF;
7895
7896 mutex_init(&hba->sli.sli4.rq[i].lock, NULL, MUTEX_DRIVER, NULL);
7897 }
7898
7899
7900 /* RQB/E */
7901 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7902 size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
7903 tmp_phys = phys;
7904 tmp_virt = virt;
7905
7906 /* Initialize the RQEs */
7907 rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
7908 for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
7909 phys = tmp_phys;
7910 virt = tmp_virt;
7911 for (k = 0; k < RQB_COUNT; k++) {
7912 word = PADDR_HI(phys);
7913 rqe->AddrHi = BE_SWAP32(word);
7914
7915 word = PADDR_LO(phys);
7916 rqe->AddrLo = BE_SWAP32(word);
7917
7918 rqb = &hba->sli.sli4.rq[i].
7919 rqb[k + (j * RQB_COUNT)];
7920 rqb->size = size;
7921 rqb->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
7922 rqb->align = ddi_ptob(hba->dip, 1L);
7923 rqb->phys = phys;
7924 rqb->virt = (void *)virt;
7925 rqb->data_handle = data_handle;
7926 rqb->dma_handle = dma_handle;
7927
7928 phys += size;
7929 virt += size;
7930 #ifdef DEBUG_RQE
7931 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7932 "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p flags=%d",
7933 i, j, k, rqb, rqb->flags);
7934 #endif /* DEBUG_RQE */
7935
7936 rqe++;
7937 }
7938 }
7939
7940 offset = (off_t)((uint64_t)((unsigned long)
7941 hba->sli.sli4.rq[i].addr.virt) -
7942 (uint64_t)((unsigned long)
7943 hba->sli.sli4.slim2.virt));
7944
7945 /* Sync the RQ buffer list */
7946 EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7947 hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7948 }
7949
7950 /* 4K Alignment */
7951 align = (4096 - (phys%4096));
7952 phys += align;
7953 virt += align;
7954
7955 /* RPI Header Templates */
7956 if (hba->sli.sli4.param.HDRR) {
7957 buf_info = &hba->sli.sli4.HeaderTmplate;
7958 bzero(buf_info, sizeof (MBUF_INFO));
7959 buf_info->size = hddr_size;
7960 buf_info->flags = FC_MBUF_DMA;
7961 buf_info->align = ddi_ptob(hba->dip, 1L);
7962 buf_info->phys = phys;
7963 buf_info->virt = (void *)virt;
7964 buf_info->data_handle = data_handle;
7965 buf_info->dma_handle = dma_handle;
7966 }
7967
7968 /* SGL */
7969
7970 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7971 "Allocating memory for %d SGLs: %d/%d",
7972 hba->sli.sli4.XRICount, sizeof (XRIobj_t), size);
7973
7974 /* Initialize double linked lists */
7975 hba->sli.sli4.XRIinuse_f =
7976 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7977 hba->sli.sli4.XRIinuse_b =
7978 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7979 hba->sli.sli4.xria_count = 0;
7980
7981 hba->sli.sli4.XRIfree_f =
7982 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7983 hba->sli.sli4.XRIfree_b =
7984 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7985 hba->sli.sli4.xrif_count = 0;
7986
7987 switch (hba->sli.sli4.mem_sgl_size) {
7988 case 1024:
7989 mseg = MEM_SGL1K;
7990 break;
7991 case 2048:
7992 mseg = MEM_SGL2K;
7993 break;
7994 case 4096:
7995 mseg = MEM_SGL4K;
7996 break;
7997 default:
7998 EMLXS_MSGF(EMLXS_CONTEXT,
7999 &emlxs_init_failed_msg,
8000 "Unsupported SGL Size: %d", hba->sli.sli4.mem_sgl_size);
8001 goto failed;
8002 }
8003
8004 hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
8005 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
8006
8007 xrip = hba->sli.sli4.XRIp;
8008 iotag = 1;
8009
8010 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
8011 xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
8012
8013 /* We don't use XRI==0, since it also represents an */
8014 /* uninitialized exchange */
8015 if (xrip->XRI == 0) {
8016 xrip++;
8017 continue;
8018 }
8019
8020 xrip->iotag = iotag++;
8021 xrip->sge_count =
8022 (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
8023
8024 /* Add xrip to end of free list */
8025 xrip->_b = hba->sli.sli4.XRIfree_b;
8026 hba->sli.sli4.XRIfree_b->_f = xrip;
8027 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8028 hba->sli.sli4.XRIfree_b = xrip;
8029 hba->sli.sli4.xrif_count++;
8030
8031 /* Allocate SGL for this xrip */
8032 xrip->SGSeg = mseg;
8033 xrip->SGList = emlxs_mem_get(hba, xrip->SGSeg);
8034
8035 if (xrip->SGList == NULL) {
8036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8037 "Unable to allocate memory for SGL %d", i);
8038 goto failed;
8039 }
8040
8041 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
8042 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
8043
8044 xrip++;
8045 }
8046
8047 /* GPIO lock */
8048 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
8049 mutex_init(&hba->gpio_lock, NULL, MUTEX_DRIVER, NULL);
8050
8051 #ifdef FMA_SUPPORT
8052 if (hba->sli.sli4.slim2.dma_handle) {
8053 if (emlxs_fm_check_dma_handle(hba,
8054 hba->sli.sli4.slim2.dma_handle)
8055 != DDI_FM_OK) {
8056 EMLXS_MSGF(EMLXS_CONTEXT,
8057 &emlxs_invalid_dma_handle_msg,
8058 "sli4_resource_alloc: hdl=%p",
8059 hba->sli.sli4.slim2.dma_handle);
8060 goto failed;
8061 }
8062 }
8063 #endif /* FMA_SUPPORT */
8064
8065 return (0);
8066
8067 failed:
8068
8069 (void) emlxs_sli4_resource_free(hba);
8070 return (ENOMEM);
8071
8072 } /* emlxs_sli4_resource_alloc */
8073
8074
8075 extern void
emlxs_sli4_zero_queue_stat(emlxs_hba_t * hba)8076 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba)
8077 {
8078 uint32_t i;
8079 uint32_t num_wq;
8080 emlxs_config_t *cfg = &CFG;
8081 clock_t time;
8082
8083 /* EQ */
8084 for (i = 0; i < hba->intr_count; i++) {
8085 hba->sli.sli4.eq[i].num_proc = 0;
8086 hba->sli.sli4.eq[i].max_proc = 0;
8087 hba->sli.sli4.eq[i].isr_count = 0;
8088 }
8089 num_wq = cfg[CFG_NUM_WQ].current * hba->intr_count;
8090 /* CQ */
8091 for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
8092 hba->sli.sli4.cq[i].num_proc = 0;
8093 hba->sli.sli4.cq[i].max_proc = 0;
8094 hba->sli.sli4.cq[i].isr_count = 0;
8095 }
8096 /* WQ */
8097 for (i = 0; i < num_wq; i++) {
8098 hba->sli.sli4.wq[i].num_proc = 0;
8099 hba->sli.sli4.wq[i].num_busy = 0;
8100 }
8101 /* RQ */
8102 for (i = 0; i < EMLXS_MAX_RQS; i++) {
8103 hba->sli.sli4.rq[i].num_proc = 0;
8104 }
8105 (void) drv_getparm(LBOLT, &time);
8106 hba->sli.sli4.que_stat_timer = (uint32_t)time;
8107
8108 } /* emlxs_sli4_zero_queue_stat */
8109
8110
8111 extern XRIobj_t *
emlxs_sli4_reserve_xri(emlxs_port_t * port,RPIobj_t * rpip,uint32_t type,uint16_t rx_id)8112 emlxs_sli4_reserve_xri(emlxs_port_t *port, RPIobj_t *rpip, uint32_t type,
8113 uint16_t rx_id)
8114 {
8115 emlxs_hba_t *hba = HBA;
8116 XRIobj_t *xrip;
8117 uint16_t iotag;
8118
8119 mutex_enter(&EMLXS_FCTAB_LOCK);
8120
8121 xrip = hba->sli.sli4.XRIfree_f;
8122
8123 if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
8124 mutex_exit(&EMLXS_FCTAB_LOCK);
8125
8126 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
8127 "Unable to reserve XRI. type=%d",
8128 type);
8129
8130 return (NULL);
8131 }
8132
8133 iotag = xrip->iotag;
8134
8135 if ((!iotag) ||
8136 ((hba->fc_table[iotag] != NULL) &&
8137 (hba->fc_table[iotag] != STALE_PACKET))) {
8138 /*
8139 * No more command slots available, retry later
8140 */
8141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8142 "Adapter Busy. Unable to reserve iotag. type=%d",
8143 type);
8144
8145 mutex_exit(&EMLXS_FCTAB_LOCK);
8146 return (NULL);
8147 }
8148
8149 xrip->state = XRI_STATE_ALLOCATED;
8150 xrip->type = type;
8151 xrip->flag = EMLXS_XRI_RESERVED;
8152 xrip->sbp = NULL;
8153
8154 xrip->rpip = rpip;
8155 xrip->rx_id = rx_id;
8156 rpip->xri_count++;
8157
8158 /* Take it off free list */
8159 (xrip->_b)->_f = xrip->_f;
8160 (xrip->_f)->_b = xrip->_b;
8161 xrip->_f = NULL;
8162 xrip->_b = NULL;
8163 hba->sli.sli4.xrif_count--;
8164
8165 /* Add it to end of inuse list */
8166 xrip->_b = hba->sli.sli4.XRIinuse_b;
8167 hba->sli.sli4.XRIinuse_b->_f = xrip;
8168 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8169 hba->sli.sli4.XRIinuse_b = xrip;
8170 hba->sli.sli4.xria_count++;
8171
8172 mutex_exit(&EMLXS_FCTAB_LOCK);
8173 return (xrip);
8174
8175 } /* emlxs_sli4_reserve_xri() */
8176
8177
8178 extern uint32_t
emlxs_sli4_unreserve_xri(emlxs_port_t * port,uint16_t xri,uint32_t lock)8179 emlxs_sli4_unreserve_xri(emlxs_port_t *port, uint16_t xri, uint32_t lock)
8180 {
8181 emlxs_hba_t *hba = HBA;
8182 XRIobj_t *xrip;
8183
8184 if (lock) {
8185 mutex_enter(&EMLXS_FCTAB_LOCK);
8186 }
8187
8188 xrip = emlxs_sli4_find_xri(port, xri);
8189
8190 if (!xrip || xrip->state == XRI_STATE_FREE) {
8191 if (lock) {
8192 mutex_exit(&EMLXS_FCTAB_LOCK);
8193 }
8194
8195 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8196 "sli4_unreserve_xri:%d already freed.", xri);
8197 return (0);
8198 }
8199
8200 /* Flush this unsolicited ct command */
8201 if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8202 (void) emlxs_flush_ct_event(port, xrip->rx_id);
8203 }
8204
8205 if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
8206 if (lock) {
8207 mutex_exit(&EMLXS_FCTAB_LOCK);
8208 }
8209
8210 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8211 "sli4_unreserve_xri:%d in use. type=%d",
8212 xrip->XRI, xrip->type);
8213 return (1);
8214 }
8215
8216 if (xrip->iotag &&
8217 (hba->fc_table[xrip->iotag] != NULL) &&
8218 (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8219 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
8220 "sli4_unreserve_xri:%d sbp dropped:%p type=%d",
8221 xrip->XRI, hba->fc_table[xrip->iotag], xrip->type);
8222
8223 hba->fc_table[xrip->iotag] = NULL;
8224 hba->io_count--;
8225 }
8226
8227 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8228 "sli4_unreserve_xri:%d unreserved. type=%d",
8229 xrip->XRI, xrip->type);
8230
8231 xrip->state = XRI_STATE_FREE;
8232 xrip->type = 0;
8233
8234 if (xrip->rpip) {
8235 xrip->rpip->xri_count--;
8236 xrip->rpip = NULL;
8237 }
8238
8239 if (xrip->reserved_rpip) {
8240 xrip->reserved_rpip->xri_count--;
8241 xrip->reserved_rpip = NULL;
8242 }
8243
8244 /* Take it off inuse list */
8245 (xrip->_b)->_f = xrip->_f;
8246 (xrip->_f)->_b = xrip->_b;
8247 xrip->_f = NULL;
8248 xrip->_b = NULL;
8249 hba->sli.sli4.xria_count--;
8250
8251 /* Add it to end of free list */
8252 xrip->_b = hba->sli.sli4.XRIfree_b;
8253 hba->sli.sli4.XRIfree_b->_f = xrip;
8254 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8255 hba->sli.sli4.XRIfree_b = xrip;
8256 hba->sli.sli4.xrif_count++;
8257
8258 if (lock) {
8259 mutex_exit(&EMLXS_FCTAB_LOCK);
8260 }
8261
8262 return (0);
8263
8264 } /* emlxs_sli4_unreserve_xri() */
8265
8266
8267 XRIobj_t *
emlxs_sli4_register_xri(emlxs_port_t * port,emlxs_buf_t * sbp,uint16_t xri,uint32_t did)8268 emlxs_sli4_register_xri(emlxs_port_t *port, emlxs_buf_t *sbp, uint16_t xri,
8269 uint32_t did)
8270 {
8271 emlxs_hba_t *hba = HBA;
8272 uint16_t iotag;
8273 XRIobj_t *xrip;
8274 emlxs_node_t *node;
8275 RPIobj_t *rpip;
8276
8277 mutex_enter(&EMLXS_FCTAB_LOCK);
8278
8279 xrip = sbp->xrip;
8280 if (!xrip) {
8281 xrip = emlxs_sli4_find_xri(port, xri);
8282
8283 if (!xrip) {
8284 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8285 "sli4_register_xri:%d XRI not found.", xri);
8286
8287 mutex_exit(&EMLXS_FCTAB_LOCK);
8288 return (NULL);
8289 }
8290 }
8291
8292 if ((xrip->state == XRI_STATE_FREE) ||
8293 !(xrip->flag & EMLXS_XRI_RESERVED)) {
8294
8295 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8296 "sli4_register_xri:%d Invalid XRI. xrip=%p "
8297 "state=%x flag=%x",
8298 xrip->XRI, xrip, xrip->state, xrip->flag);
8299
8300 mutex_exit(&EMLXS_FCTAB_LOCK);
8301 return (NULL);
8302 }
8303
8304 iotag = xrip->iotag;
8305
8306 if ((!iotag) ||
8307 ((hba->fc_table[iotag] != NULL) &&
8308 (hba->fc_table[iotag] != STALE_PACKET))) {
8309
8310 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8311 "sli4_register_xri:%d Invalid fc_table entry. "
8312 "iotag=%d entry=%p",
8313 xrip->XRI, iotag, hba->fc_table[iotag]);
8314
8315 mutex_exit(&EMLXS_FCTAB_LOCK);
8316 return (NULL);
8317 }
8318
8319 hba->fc_table[iotag] = sbp;
8320 hba->io_count++;
8321
8322 sbp->iotag = iotag;
8323 sbp->xrip = xrip;
8324
8325 xrip->flag &= ~EMLXS_XRI_RESERVED;
8326 xrip->sbp = sbp;
8327
8328 /* If we did not have a registered RPI when we reserved */
8329 /* this exchange, check again now. */
8330 if (xrip->rpip && (xrip->rpip->RPI == FABRIC_RPI)) {
8331 node = emlxs_node_find_did(port, did, 1);
8332 rpip = EMLXS_NODE_TO_RPI(port, node);
8333
8334 if (rpip && (rpip->RPI != FABRIC_RPI)) {
8335 /* Move the XRI to the new RPI */
8336 xrip->rpip->xri_count--;
8337 xrip->rpip = rpip;
8338 rpip->xri_count++;
8339 }
8340 }
8341
8342 mutex_exit(&EMLXS_FCTAB_LOCK);
8343
8344 return (xrip);
8345
8346 } /* emlxs_sli4_register_xri() */
8347
8348
8349 /* Performs both reserve and register functions for XRI */
8350 static XRIobj_t *
emlxs_sli4_alloc_xri(emlxs_port_t * port,emlxs_buf_t * sbp,RPIobj_t * rpip,uint32_t type)8351 emlxs_sli4_alloc_xri(emlxs_port_t *port, emlxs_buf_t *sbp, RPIobj_t *rpip,
8352 uint32_t type)
8353 {
8354 emlxs_hba_t *hba = HBA;
8355 XRIobj_t *xrip;
8356 uint16_t iotag;
8357
8358 mutex_enter(&EMLXS_FCTAB_LOCK);
8359
8360 xrip = hba->sli.sli4.XRIfree_f;
8361
8362 if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
8363 mutex_exit(&EMLXS_FCTAB_LOCK);
8364
8365 return (NULL);
8366 }
8367
8368 /* Get the iotag by registering the packet */
8369 iotag = xrip->iotag;
8370
8371 if ((!iotag) ||
8372 ((hba->fc_table[iotag] != NULL) &&
8373 (hba->fc_table[iotag] != STALE_PACKET))) {
8374 /*
8375 * No more command slots available, retry later
8376 */
8377 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8378 "Adapter Busy. Unable to alloc iotag:(0x%x)(%p) type=%d",
8379 iotag, hba->fc_table[iotag], type);
8380
8381 mutex_exit(&EMLXS_FCTAB_LOCK);
8382 return (NULL);
8383 }
8384
8385 hba->fc_table[iotag] = sbp;
8386 hba->io_count++;
8387
8388 sbp->iotag = iotag;
8389 sbp->xrip = xrip;
8390
8391 xrip->state = XRI_STATE_ALLOCATED;
8392 xrip->type = type;
8393 xrip->flag = 0;
8394 xrip->sbp = sbp;
8395
8396 xrip->rpip = rpip;
8397 rpip->xri_count++;
8398
8399 /* Take it off free list */
8400 (xrip->_b)->_f = xrip->_f;
8401 (xrip->_f)->_b = xrip->_b;
8402 xrip->_f = NULL;
8403 xrip->_b = NULL;
8404 hba->sli.sli4.xrif_count--;
8405
8406 /* Add it to end of inuse list */
8407 xrip->_b = hba->sli.sli4.XRIinuse_b;
8408 hba->sli.sli4.XRIinuse_b->_f = xrip;
8409 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8410 hba->sli.sli4.XRIinuse_b = xrip;
8411 hba->sli.sli4.xria_count++;
8412
8413 mutex_exit(&EMLXS_FCTAB_LOCK);
8414
8415 return (xrip);
8416
8417 } /* emlxs_sli4_alloc_xri() */
8418
8419
8420 /* EMLXS_FCTAB_LOCK must be held to enter */
8421 extern XRIobj_t *
emlxs_sli4_find_xri(emlxs_port_t * port,uint16_t xri)8422 emlxs_sli4_find_xri(emlxs_port_t *port, uint16_t xri)
8423 {
8424 emlxs_hba_t *hba = HBA;
8425 XRIobj_t *xrip;
8426
8427 xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
8428 while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
8429 if ((xrip->state >= XRI_STATE_ALLOCATED) &&
8430 (xrip->XRI == xri)) {
8431 return (xrip);
8432 }
8433 xrip = xrip->_f;
8434 }
8435
8436 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8437 "Unable to find XRI x%x", xri);
8438
8439 return (NULL);
8440
8441 } /* emlxs_sli4_find_xri() */
8442
8443
8444
8445
8446 extern void
emlxs_sli4_free_xri(emlxs_port_t * port,emlxs_buf_t * sbp,XRIobj_t * xrip,uint8_t lock)8447 emlxs_sli4_free_xri(emlxs_port_t *port, emlxs_buf_t *sbp, XRIobj_t *xrip,
8448 uint8_t lock)
8449 {
8450 emlxs_hba_t *hba = HBA;
8451
8452 if (lock) {
8453 mutex_enter(&EMLXS_FCTAB_LOCK);
8454 }
8455
8456 if (xrip) {
8457 if (xrip->state == XRI_STATE_FREE) {
8458 if (lock) {
8459 mutex_exit(&EMLXS_FCTAB_LOCK);
8460 }
8461 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8462 "Free XRI:%x, Already freed. type=%d",
8463 xrip->XRI, xrip->type);
8464 return;
8465 }
8466
8467 if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8468 (void) emlxs_flush_ct_event(port, xrip->rx_id);
8469 }
8470
8471 if (xrip->iotag &&
8472 (hba->fc_table[xrip->iotag] != NULL) &&
8473 (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8474 hba->fc_table[xrip->iotag] = NULL;
8475 hba->io_count--;
8476 }
8477
8478 xrip->state = XRI_STATE_FREE;
8479 xrip->type = 0;
8480 xrip->flag = 0;
8481
8482 if (xrip->rpip) {
8483 xrip->rpip->xri_count--;
8484 xrip->rpip = NULL;
8485 }
8486
8487 if (xrip->reserved_rpip) {
8488 xrip->reserved_rpip->xri_count--;
8489 xrip->reserved_rpip = NULL;
8490 }
8491
8492 /* Take it off inuse list */
8493 (xrip->_b)->_f = xrip->_f;
8494 (xrip->_f)->_b = xrip->_b;
8495 xrip->_f = NULL;
8496 xrip->_b = NULL;
8497 hba->sli.sli4.xria_count--;
8498
8499 /* Add it to end of free list */
8500 xrip->_b = hba->sli.sli4.XRIfree_b;
8501 hba->sli.sli4.XRIfree_b->_f = xrip;
8502 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8503 hba->sli.sli4.XRIfree_b = xrip;
8504 hba->sli.sli4.xrif_count++;
8505 }
8506
8507 if (sbp) {
8508 if (!(sbp->pkt_flags & PACKET_VALID) ||
8509 (sbp->pkt_flags &
8510 (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
8511 if (lock) {
8512 mutex_exit(&EMLXS_FCTAB_LOCK);
8513 }
8514 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8515 "Free XRI: sbp invalid. sbp=%p flags=%x xri=%d",
8516 sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
8517 return;
8518 }
8519
8520 if (xrip && (xrip->iotag != sbp->iotag)) {
8521 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8522 "sbp/iotag mismatch %p iotag:%d %d", sbp,
8523 sbp->iotag, xrip->iotag);
8524 }
8525
8526 if (sbp->iotag) {
8527 if (sbp == hba->fc_table[sbp->iotag]) {
8528 hba->fc_table[sbp->iotag] = NULL;
8529 hba->io_count--;
8530
8531 if (sbp->xrip) {
8532 /* Exchange is still reserved */
8533 sbp->xrip->flag |= EMLXS_XRI_RESERVED;
8534 }
8535 }
8536 sbp->iotag = 0;
8537 }
8538
8539 if (xrip) {
8540 sbp->xrip = 0;
8541 }
8542
8543 if (lock) {
8544 mutex_exit(&EMLXS_FCTAB_LOCK);
8545 }
8546
8547 /* Clean up the sbp */
8548 mutex_enter(&sbp->mtx);
8549
8550 if (sbp->pkt_flags & PACKET_IN_TXQ) {
8551 sbp->pkt_flags &= ~PACKET_IN_TXQ;
8552 hba->channel_tx_count--;
8553 }
8554
8555 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
8556 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
8557 }
8558
8559 mutex_exit(&sbp->mtx);
8560 } else {
8561 if (lock) {
8562 mutex_exit(&EMLXS_FCTAB_LOCK);
8563 }
8564 }
8565
8566 } /* emlxs_sli4_free_xri() */
8567
8568
8569 static int
emlxs_sli4_post_sgl_pages(emlxs_hba_t * hba,MAILBOXQ * mbq)8570 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
8571 {
8572 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8573 emlxs_port_t *port = &PPORT;
8574 XRIobj_t *xrip;
8575 MATCHMAP *mp;
8576 mbox_req_hdr_t *hdr_req;
8577 uint32_t i;
8578 uint32_t cnt;
8579 uint32_t xri_cnt;
8580 uint32_t j;
8581 uint32_t size;
8582 IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
8583
8584 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8585 mbq->bp = NULL;
8586 mbq->mbox_cmpl = NULL;
8587
8588 if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
8589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8590 "Unable to POST_SGL. Mailbox cmd=%x ",
8591 mb->mbxCommand);
8592 return (EIO);
8593 }
8594 mbq->nonembed = (void *)mp;
8595
8596 /*
8597 * Signifies a non embedded command
8598 */
8599 mb->un.varSLIConfig.be.embedded = 0;
8600 mb->mbxCommand = MBX_SLI_CONFIG;
8601 mb->mbxOwner = OWN_HOST;
8602
8603 hdr_req = (mbox_req_hdr_t *)mp->virt;
8604 post_sgl =
8605 (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
8606
8607 xrip = hba->sli.sli4.XRIp;
8608
8609 /* For each extent */
8610 for (j = 0; j < hba->sli.sli4.XRIExtCount; j++) {
8611 cnt = hba->sli.sli4.XRIExtSize;
8612 while (cnt) {
8613 if (xrip->XRI == 0) {
8614 cnt--;
8615 xrip++;
8616 continue;
8617 }
8618
8619 bzero((void *) hdr_req, mp->size);
8620 size = mp->size - IOCTL_HEADER_SZ;
8621
8622 mb->un.varSLIConfig.be.payload_length =
8623 mp->size;
8624 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8625 IOCTL_SUBSYSTEM_FCOE;
8626 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8627 FCOE_OPCODE_CFG_POST_SGL_PAGES;
8628 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8629 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8630
8631 hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8632 hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8633 hdr_req->timeout = 0;
8634 hdr_req->req_length = size;
8635
8636 post_sgl->params.request.xri_count = 0;
8637 post_sgl->params.request.xri_start = xrip->XRI;
8638
8639 xri_cnt = (size -
8640 sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8641 sizeof (FCOE_SGL_PAGES);
8642
8643 for (i = 0; (i < xri_cnt) && cnt; i++) {
8644 post_sgl->params.request.xri_count++;
8645 post_sgl->params.request.pages[i].\
8646 sgl_page0.addrLow =
8647 PADDR_LO(xrip->SGList->phys);
8648 post_sgl->params.request.pages[i].\
8649 sgl_page0.addrHigh =
8650 PADDR_HI(xrip->SGList->phys);
8651
8652 cnt--;
8653 xrip++;
8654 }
8655
8656 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8657 MBX_SUCCESS) {
8658 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8659 "Unable to POST_SGL. Mailbox cmd=%x "
8660 "status=%x XRI cnt:%d start:%d",
8661 mb->mbxCommand, mb->mbxStatus,
8662 post_sgl->params.request.xri_count,
8663 post_sgl->params.request.xri_start);
8664 emlxs_mem_buf_free(hba, mp);
8665 mbq->nonembed = NULL;
8666 return (EIO);
8667 }
8668 }
8669 }
8670
8671 emlxs_mem_buf_free(hba, mp);
8672 mbq->nonembed = NULL;
8673 return (0);
8674
8675 } /* emlxs_sli4_post_sgl_pages() */
8676
8677
8678 static int
emlxs_sli4_post_hdr_tmplates(emlxs_hba_t * hba,MAILBOXQ * mbq)8679 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
8680 {
8681 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8682 emlxs_port_t *port = &PPORT;
8683 uint32_t j;
8684 uint32_t k;
8685 uint64_t addr;
8686 IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
8687 uint16_t num_pages;
8688
8689 if (!(hba->sli.sli4.param.HDRR)) {
8690 return (0);
8691 }
8692
8693 /* Bytes per extent */
8694 j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
8695
8696 /* Pages required per extent (page == 4096 bytes) */
8697 num_pages = (j/4096) + ((j%4096)? 1:0);
8698
8699 addr = hba->sli.sli4.HeaderTmplate.phys;
8700
8701 /* For each extent */
8702 for (j = 0; j < hba->sli.sli4.RPIExtCount; j++) {
8703 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8704 mbq->bp = NULL;
8705 mbq->mbox_cmpl = NULL;
8706
8707 /*
8708 * Signifies an embedded command
8709 */
8710 mb->un.varSLIConfig.be.embedded = 1;
8711
8712 mb->mbxCommand = MBX_SLI_CONFIG;
8713 mb->mbxOwner = OWN_HOST;
8714 mb->un.varSLIConfig.be.payload_length =
8715 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
8716 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8717 IOCTL_SUBSYSTEM_FCOE;
8718 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8719 FCOE_OPCODE_POST_HDR_TEMPLATES;
8720 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8721 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
8722 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
8723
8724 post_hdr =
8725 (IOCTL_FCOE_POST_HDR_TEMPLATES *)
8726 &mb->un.varSLIConfig.payload;
8727 post_hdr->params.request.num_pages = num_pages;
8728 post_hdr->params.request.rpi_offset = hba->sli.sli4.RPIBase[j];
8729
8730 for (k = 0; k < num_pages; k++) {
8731 post_hdr->params.request.pages[k].addrLow =
8732 PADDR_LO(addr);
8733 post_hdr->params.request.pages[k].addrHigh =
8734 PADDR_HI(addr);
8735 addr += 4096;
8736 }
8737
8738 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8739 MBX_SUCCESS) {
8740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8741 "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x "
8742 "status=%x ",
8743 mb->mbxCommand, mb->mbxStatus);
8744 return (EIO);
8745 }
8746 emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
8747 }
8748
8749 return (0);
8750
8751 } /* emlxs_sli4_post_hdr_tmplates() */
8752
8753
8754 static int
emlxs_sli4_create_queues(emlxs_hba_t * hba,MAILBOXQ * mbq)8755 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
8756 {
8757 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8758 emlxs_port_t *port = &PPORT;
8759 emlxs_config_t *cfg = &CFG;
8760 IOCTL_COMMON_EQ_CREATE *eq;
8761 IOCTL_COMMON_CQ_CREATE *cq;
8762 IOCTL_FCOE_WQ_CREATE *wq;
8763 IOCTL_FCOE_RQ_CREATE *rq;
8764 IOCTL_COMMON_MQ_CREATE *mq;
8765 IOCTL_COMMON_MQ_CREATE_EXT *mq_ext;
8766 uint16_t i, j;
8767 uint16_t num_cq, total_cq;
8768 uint16_t num_wq, total_wq;
8769
8770 /*
8771 * The first CQ is reserved for ASYNC events,
8772 * the second is reserved for unsol rcv, the rest
8773 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
8774 */
8775
8776 total_cq = 0;
8777 total_wq = 0;
8778
8779 /* Create EQ's */
8780 for (i = 0; i < hba->intr_count; i++) {
8781 emlxs_mb_eq_create(hba, mbq, i);
8782 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8783 MBX_SUCCESS) {
8784 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8785 "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
8786 i, mb->mbxCommand, mb->mbxStatus);
8787 return (EIO);
8788 }
8789 eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
8790 hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
8791 hba->sli.sli4.eq[i].lastwq = total_wq;
8792 hba->sli.sli4.eq[i].msix_vector = i;
8793
8794 emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
8795 num_wq = cfg[CFG_NUM_WQ].current;
8796 num_cq = num_wq;
8797 if (i == 0) {
8798 /* One for RQ handling, one for mbox/event handling */
8799 num_cq += EMLXS_CQ_OFFSET_WQ;
8800 }
8801
8802 /* Create CQ's */
8803 for (j = 0; j < num_cq; j++) {
8804 /* Reuse mbq from previous mbox */
8805 bzero(mbq, sizeof (MAILBOXQ));
8806
8807 hba->sli.sli4.cq[total_cq].eqid =
8808 hba->sli.sli4.eq[i].qid;
8809
8810 emlxs_mb_cq_create(hba, mbq, total_cq);
8811 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8812 MBX_SUCCESS) {
8813 EMLXS_MSGF(EMLXS_CONTEXT,
8814 &emlxs_init_failed_msg, "Unable to Create "
8815 "CQ %d: hba=%p Mailbox cmd=%x status=%x ",
8816 total_cq, hba, mb->mbxCommand,
8817 mb->mbxStatus);
8818 return (EIO);
8819 }
8820 cq = (IOCTL_COMMON_CQ_CREATE *)
8821 &mb->un.varSLIConfig.payload;
8822 hba->sli.sli4.cq[total_cq].qid =
8823 cq->params.response.CQId;
8824
8825 switch (total_cq) {
8826 case EMLXS_CQ_MBOX:
8827 /* First CQ is for async event handling */
8828 hba->sli.sli4.cq[total_cq].type =
8829 EMLXS_CQ_TYPE_GROUP1;
8830 break;
8831
8832 case EMLXS_CQ_RCV:
8833 /* Second CQ is for unsol receive handling */
8834 hba->sli.sli4.cq[total_cq].type =
8835 EMLXS_CQ_TYPE_GROUP2;
8836 break;
8837
8838 default:
8839 /* Setup CQ to channel mapping */
8840 hba->sli.sli4.cq[total_cq].type =
8841 EMLXS_CQ_TYPE_GROUP2;
8842 hba->sli.sli4.cq[total_cq].channelp =
8843 &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
8844 break;
8845 }
8846 hba->sli.sli4.cq[total_cq].qe_valid = 1;
8847 emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb,
8848 18, 0);
8849 total_cq++;
8850 }
8851
8852 /* Create WQ's */
8853 for (j = 0; j < num_wq; j++) {
8854 /* Reuse mbq from previous mbox */
8855 bzero(mbq, sizeof (MAILBOXQ));
8856
8857 hba->sli.sli4.wq[total_wq].cqid =
8858 hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
8859
8860 emlxs_mb_wq_create(hba, mbq, total_wq);
8861 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8862 MBX_SUCCESS) {
8863 EMLXS_MSGF(EMLXS_CONTEXT,
8864 &emlxs_init_failed_msg, "Unable to Create "
8865 "WQ %d: Mailbox cmd=%x status=%x ",
8866 total_wq, mb->mbxCommand, mb->mbxStatus);
8867 return (EIO);
8868 }
8869 wq = (IOCTL_FCOE_WQ_CREATE *)
8870 &mb->un.varSLIConfig.payload;
8871 hba->sli.sli4.wq[total_wq].qid =
8872 wq->params.response.WQId;
8873
8874 hba->sli.sli4.wq[total_wq].cqid =
8875 hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
8876 emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb,
8877 18, 0);
8878 total_wq++;
8879 }
8880 hba->last_msiid = i;
8881 }
8882
8883 /* We assume 1 RQ pair will handle ALL incoming data */
8884 /* Create RQs */
8885 for (i = 0; i < EMLXS_MAX_RQS; i++) {
8886 /* Personalize the RQ */
8887 switch (i) {
8888 case 0:
8889 hba->sli.sli4.rq[i].cqid =
8890 hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8891 break;
8892 case 1:
8893 hba->sli.sli4.rq[i].cqid =
8894 hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8895 break;
8896 default:
8897 hba->sli.sli4.rq[i].cqid = 0xffff;
8898 }
8899
8900 /* Reuse mbq from previous mbox */
8901 bzero(mbq, sizeof (MAILBOXQ));
8902
8903 emlxs_mb_rq_create(hba, mbq, i);
8904 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8905 MBX_SUCCESS) {
8906 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8907 "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
8908 i, mb->mbxCommand, mb->mbxStatus);
8909 return (EIO);
8910 }
8911
8912 rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
8913 hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
8914 emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
8915
8916 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8917 "RQ CREATE: rq[%d].qid=%d cqid=%d",
8918 i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
8919
8920 /* Initialize the host_index */
8921 hba->sli.sli4.rq[i].host_index = 0;
8922
8923 /* If Data queue was just created, */
8924 /* then post buffers using the header qid */
8925 if ((i & 0x1)) {
8926 /* Ring the RQ doorbell to post buffers */
8927
8928 emlxs_sli4_write_rqdb(hba, hba->sli.sli4.rq[i-1].qid,
8929 RQB_COUNT);
8930
8931 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8932 "RQ CREATE: Doorbell rang: qid=%d count=%d",
8933 hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
8934 }
8935 }
8936
8937 /* Create MQ */
8938
8939 /* Personalize the MQ */
8940 hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
8941
8942 /* Reuse mbq from previous mbox */
8943 bzero(mbq, sizeof (MAILBOXQ));
8944
8945 emlxs_mb_mq_create_ext(hba, mbq);
8946 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8947 MBX_SUCCESS) {
8948 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8949 "Unable to Create MQ_EXT %d: Mailbox cmd=%x status=%x ",
8950 i, mb->mbxCommand, mb->mbxStatus);
8951
8952 /* Reuse mbq from previous mbox */
8953 bzero(mbq, sizeof (MAILBOXQ));
8954
8955 emlxs_mb_mq_create(hba, mbq);
8956 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8957 MBX_SUCCESS) {
8958 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8959 "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
8960 i, mb->mbxCommand, mb->mbxStatus);
8961 return (EIO);
8962 }
8963
8964 mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
8965 hba->sli.sli4.mq.qid = mq->params.response.MQId;
8966 return (0);
8967 }
8968
8969 mq_ext = (IOCTL_COMMON_MQ_CREATE_EXT *)&mb->un.varSLIConfig.payload;
8970 hba->sli.sli4.mq.qid = mq_ext->params.response.MQId;
8971 return (0);
8972
8973 } /* emlxs_sli4_create_queues() */
8974
8975
8976 extern void
emlxs_sli4_timer(emlxs_hba_t * hba)8977 emlxs_sli4_timer(emlxs_hba_t *hba)
8978 {
8979 /* Perform SLI4 level timer checks */
8980
8981 emlxs_fcf_timer_notify(hba);
8982
8983 emlxs_sli4_timer_check_mbox(hba);
8984
8985 return;
8986
8987 } /* emlxs_sli4_timer() */
8988
8989
8990 static void
emlxs_sli4_timer_check_mbox(emlxs_hba_t * hba)8991 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
8992 {
8993 emlxs_port_t *port = &PPORT;
8994 emlxs_config_t *cfg = &CFG;
8995 MAILBOX *mb = NULL;
8996
8997 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
8998 return;
8999 }
9000
9001 mutex_enter(&EMLXS_PORT_LOCK);
9002
9003 /* Return if timer hasn't expired */
9004 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
9005 mutex_exit(&EMLXS_PORT_LOCK);
9006 return;
9007 }
9008
9009 /* The first to service the mbox queue will clear the timer */
9010 hba->mbox_timer = 0;
9011
9012 if (hba->mbox_queue_flag) {
9013 if (hba->mbox_mbq) {
9014 mb = (MAILBOX *)hba->mbox_mbq;
9015 }
9016 }
9017
9018 if (mb) {
9019 switch (hba->mbox_queue_flag) {
9020 case MBX_NOWAIT:
9021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
9022 "%s: Nowait.",
9023 emlxs_mb_cmd_xlate(mb->mbxCommand));
9024 break;
9025
9026 case MBX_SLEEP:
9027 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
9028 "%s: mb=%p Sleep.",
9029 emlxs_mb_cmd_xlate(mb->mbxCommand),
9030 mb);
9031 break;
9032
9033 case MBX_POLL:
9034 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
9035 "%s: mb=%p Polled.",
9036 emlxs_mb_cmd_xlate(mb->mbxCommand),
9037 mb);
9038 break;
9039
9040 default:
9041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
9042 "%s: mb=%p (%d).",
9043 emlxs_mb_cmd_xlate(mb->mbxCommand),
9044 mb, hba->mbox_queue_flag);
9045 break;
9046 }
9047 } else {
9048 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
9049 }
9050
9051 hba->flag |= FC_MBOX_TIMEOUT;
9052 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
9053
9054 mutex_exit(&EMLXS_PORT_LOCK);
9055
9056 /* Perform mailbox cleanup */
9057 /* This will wake any sleeping or polling threads */
9058 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
9059
9060 /* Trigger adapter shutdown */
9061 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
9062
9063 return;
9064
9065 } /* emlxs_sli4_timer_check_mbox() */
9066
9067 static void
emlxs_sli4_gpio_timer_start(emlxs_hba_t * hba)9068 emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba)
9069 {
9070 mutex_enter(&hba->gpio_lock);
9071
9072 if (!hba->gpio_timer) {
9073 hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
9074 drv_usectohz(100000));
9075 }
9076
9077 mutex_exit(&hba->gpio_lock);
9078
9079 } /* emlxs_sli4_gpio_timer_start() */
9080
9081 static void
emlxs_sli4_gpio_timer_stop(emlxs_hba_t * hba)9082 emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba)
9083 {
9084 mutex_enter(&hba->gpio_lock);
9085
9086 if (hba->gpio_timer) {
9087 (void) untimeout(hba->gpio_timer);
9088 hba->gpio_timer = 0;
9089 }
9090
9091 mutex_exit(&hba->gpio_lock);
9092
9093 delay(drv_usectohz(300000));
9094 } /* emlxs_sli4_gpio_timer_stop() */
9095
9096 static void
emlxs_sli4_gpio_timer(void * arg)9097 emlxs_sli4_gpio_timer(void *arg)
9098 {
9099 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
9100
9101 mutex_enter(&hba->gpio_lock);
9102
9103 if (hba->gpio_timer) {
9104 emlxs_sli4_check_gpio(hba);
9105 hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
9106 drv_usectohz(100000));
9107 }
9108
9109 mutex_exit(&hba->gpio_lock);
9110 } /* emlxs_sli4_gpio_timer() */
9111
9112 static void
emlxs_sli4_check_gpio(emlxs_hba_t * hba)9113 emlxs_sli4_check_gpio(emlxs_hba_t *hba)
9114 {
9115 hba->gpio_desired = 0;
9116
9117 if (hba->flag & FC_GPIO_LINK_UP) {
9118 if (hba->io_active)
9119 hba->gpio_desired |= EMLXS_GPIO_ACT;
9120
9121 /* This is model specific to ATTO gen5 lancer cards */
9122
9123 switch (hba->linkspeed) {
9124 case LA_4GHZ_LINK:
9125 hba->gpio_desired |= EMLXS_GPIO_LO;
9126 break;
9127
9128 case LA_8GHZ_LINK:
9129 hba->gpio_desired |= EMLXS_GPIO_HI;
9130 break;
9131
9132 case LA_16GHZ_LINK:
9133 hba->gpio_desired |=
9134 EMLXS_GPIO_LO | EMLXS_GPIO_HI;
9135 break;
9136 }
9137 }
9138
9139 if (hba->gpio_current != hba->gpio_desired) {
9140 emlxs_port_t *port = &PPORT;
9141 uint8_t pin;
9142 uint8_t pinval;
9143 MAILBOXQ *mbq;
9144 uint32_t rval;
9145
9146 if (!emlxs_sli4_fix_gpio(hba, &pin, &pinval))
9147 return;
9148
9149 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
9150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9151 "Unable to allocate GPIO mailbox.");
9152
9153 hba->gpio_bit = 0;
9154 return;
9155 }
9156
9157 emlxs_mb_gpio_write(hba, mbq, pin, pinval);
9158 mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
9159
9160 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
9161
9162 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
9163 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9164 "Unable to start GPIO mailbox.");
9165
9166 hba->gpio_bit = 0;
9167 emlxs_mem_put(hba, MEM_MBOX, mbq);
9168 return;
9169 }
9170 }
9171 } /* emlxs_sli4_check_gpio */
9172
9173 static uint32_t
emlxs_sli4_fix_gpio(emlxs_hba_t * hba,uint8_t * pin,uint8_t * pinval)9174 emlxs_sli4_fix_gpio(emlxs_hba_t *hba, uint8_t *pin, uint8_t *pinval)
9175 {
9176 uint8_t dif = hba->gpio_desired ^ hba->gpio_current;
9177 uint8_t bit;
9178 uint8_t i;
9179
9180 /* Get out if no pins to set a GPIO request is pending */
9181
9182 if (dif == 0 || hba->gpio_bit)
9183 return (0);
9184
9185 /* Fix one pin at a time */
9186
9187 bit = dif & -dif;
9188 hba->gpio_bit = bit;
9189 dif = hba->gpio_current ^ bit;
9190
9191 for (i = EMLXS_GPIO_PIN_LO; bit > 1; ++i) {
9192 dif >>= 1;
9193 bit >>= 1;
9194 }
9195
9196 /* Pins are active low so invert the bit value */
9197
9198 *pin = hba->gpio_pin[i];
9199 *pinval = ~dif & bit;
9200
9201 return (1);
9202 } /* emlxs_sli4_fix_gpio */
9203
9204 static uint32_t
emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)9205 emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
9206 {
9207 MAILBOX *mb;
9208 uint8_t pin;
9209 uint8_t pinval;
9210
9211 mb = (MAILBOX *)mbq;
9212
9213 mutex_enter(&hba->gpio_lock);
9214
9215 if (mb->mbxStatus == 0)
9216 hba->gpio_current ^= hba->gpio_bit;
9217
9218 hba->gpio_bit = 0;
9219
9220 if (emlxs_sli4_fix_gpio(hba, &pin, &pinval)) {
9221 emlxs_port_t *port = &PPORT;
9222 MAILBOXQ *mbq;
9223 uint32_t rval;
9224
9225 /*
9226 * We're not using the mb_retry routine here because for some
9227 * reason it doesn't preserve the completion routine. Just let
9228 * this mbox cmd fail to start here and run when the mailbox
9229 * is no longer busy.
9230 */
9231
9232 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
9233 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9234 "Unable to allocate GPIO mailbox.");
9235
9236 hba->gpio_bit = 0;
9237 goto done;
9238 }
9239
9240 emlxs_mb_gpio_write(hba, mbq, pin, pinval);
9241 mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
9242
9243 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
9244
9245 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
9246 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9247 "Unable to start GPIO mailbox.");
9248
9249 hba->gpio_bit = 0;
9250 emlxs_mem_put(hba, MEM_MBOX, mbq);
9251 goto done;
9252 }
9253 }
9254
9255 done:
9256 mutex_exit(&hba->gpio_lock);
9257
9258 return (0);
9259 }
9260
9261 extern void
emlxs_data_dump(emlxs_port_t * port,char * str,uint32_t * iptr,int cnt,int err)9262 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
9263 {
9264 void *msg;
9265
9266 if (!port || !str || !iptr || !cnt) {
9267 return;
9268 }
9269
9270 if (err) {
9271 msg = &emlxs_sli_err_msg;
9272 } else {
9273 msg = &emlxs_sli_detail_msg;
9274 }
9275
9276 if (cnt) {
9277 EMLXS_MSGF(EMLXS_CONTEXT, msg,
9278 "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
9279 *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
9280 }
9281 if (cnt > 6) {
9282 EMLXS_MSGF(EMLXS_CONTEXT, msg,
9283 "%s06: %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
9284 *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
9285 }
9286 if (cnt > 12) {
9287 EMLXS_MSGF(EMLXS_CONTEXT, msg,
9288 "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
9289 *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
9290 }
9291 if (cnt > 18) {
9292 EMLXS_MSGF(EMLXS_CONTEXT, msg,
9293 "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
9294 *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
9295 }
9296 if (cnt > 24) {
9297 EMLXS_MSGF(EMLXS_CONTEXT, msg,
9298 "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
9299 *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
9300 }
9301 if (cnt > 30) {
9302 EMLXS_MSGF(EMLXS_CONTEXT, msg,
9303 "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
9304 *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
9305 }
9306 if (cnt > 36) {
9307 EMLXS_MSGF(EMLXS_CONTEXT, msg,
9308 "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
9309 *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
9310 }
9311
9312 } /* emlxs_data_dump() */
9313
9314
9315 extern void
emlxs_ue_dump(emlxs_hba_t * hba,char * str)9316 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
9317 {
9318 emlxs_port_t *port = &PPORT;
9319 uint32_t status;
9320 uint32_t ue_h;
9321 uint32_t ue_l;
9322 uint32_t on1;
9323 uint32_t on2;
9324
9325 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
9326 case SLI_INTF_IF_TYPE_0:
9327 ue_l = ddi_get32(hba->pci_acc_handle,
9328 hba->sli.sli4.ERR1_reg_addr);
9329 ue_h = ddi_get32(hba->pci_acc_handle,
9330 hba->sli.sli4.ERR2_reg_addr);
9331
9332 on1 = ddi_get32(hba->pci_acc_handle,
9333 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
9334 on2 = ddi_get32(hba->pci_acc_handle,
9335 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
9336
9337 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9338 "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
9339 ue_l, ue_h, on1, on2);
9340 break;
9341
9342 case SLI_INTF_IF_TYPE_2:
9343 case SLI_INTF_IF_TYPE_6:
9344 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9345 hba->sli.sli4.STATUS_reg_addr);
9346
9347 ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9348 hba->sli.sli4.ERR1_reg_addr);
9349 ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9350 hba->sli.sli4.ERR2_reg_addr);
9351
9352 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9353 "%s: status:%08x err1:%08x err2:%08x", str,
9354 status, ue_l, ue_h);
9355
9356 break;
9357 }
9358
9359 #ifdef FMA_SUPPORT
9360 /* Access handle validation */
9361 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
9362 #endif /* FMA_SUPPORT */
9363
9364 } /* emlxs_ue_dump() */
9365
9366
9367 static void
emlxs_sli4_poll_erratt(emlxs_hba_t * hba)9368 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
9369 {
9370 emlxs_port_t *port = &PPORT;
9371 uint32_t status;
9372 uint32_t ue_h;
9373 uint32_t ue_l;
9374 uint32_t error = 0;
9375
9376 if (hba->flag & FC_HARDWARE_ERROR) {
9377 return;
9378 }
9379
9380 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
9381 case SLI_INTF_IF_TYPE_0:
9382 ue_l = ddi_get32(hba->pci_acc_handle,
9383 hba->sli.sli4.ERR1_reg_addr);
9384 ue_h = ddi_get32(hba->pci_acc_handle,
9385 hba->sli.sli4.ERR2_reg_addr);
9386
9387 if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
9388 (~hba->sli.sli4.ue_mask_hi & ue_h) ||
9389 (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
9390 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
9391 "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
9392 "maskHigh:%08x flag:%08x",
9393 ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
9394 hba->sli.sli4.ue_mask_hi, hba->sli.sli4.flag);
9395
9396 error = 2;
9397 }
9398 break;
9399
9400 case SLI_INTF_IF_TYPE_2:
9401 case SLI_INTF_IF_TYPE_6:
9402 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9403 hba->sli.sli4.STATUS_reg_addr);
9404
9405 if ((status & SLI_STATUS_ERROR) ||
9406 (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
9407 ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9408 hba->sli.sli4.ERR1_reg_addr);
9409 ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9410 hba->sli.sli4.ERR2_reg_addr);
9411
9412 error = (status & SLI_STATUS_RESET_NEEDED)? 1:2;
9413
9414 if (error == 1) {
9415 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
9416 "Host Error: status:%08x err1:%08x "
9417 "err2:%08x flag:%08x reset",
9418 status, ue_l, ue_h, hba->sli.sli4.flag);
9419 } else {
9420 EMLXS_MSGF(EMLXS_CONTEXT,
9421 &emlxs_hardware_error_msg,
9422 "Host Error: status:%08x err1:%08x "
9423 "err2:%08x flag:%08x shutdown",
9424 status, ue_l, ue_h, hba->sli.sli4.flag);
9425 }
9426 }
9427 break;
9428 }
9429
9430 if (error == 2) {
9431 EMLXS_STATE_CHANGE(hba, FC_ERROR);
9432
9433 emlxs_sli4_hba_flush_chipq(hba);
9434
9435 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
9436
9437 } else if (error == 1) {
9438 EMLXS_STATE_CHANGE(hba, FC_ERROR);
9439
9440 emlxs_sli4_hba_flush_chipq(hba);
9441
9442 emlxs_thread_spawn(hba, emlxs_restart_thread, 0, 0);
9443 }
9444
9445 #ifdef FMA_SUPPORT
9446 /* Access handle validation */
9447 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
9448 #endif /* FMA_SUPPORT */
9449
9450 } /* emlxs_sli4_poll_erratt() */
9451
9452
9453 static uint32_t
emlxs_sli4_reg_did(emlxs_port_t * port,uint32_t did,SERV_PARM * param,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)9454 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
9455 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9456 {
9457 emlxs_hba_t *hba = HBA;
9458 NODELIST *node;
9459 RPIobj_t *rpip;
9460 uint32_t rval;
9461
9462 /* Check for invalid node ids to register */
9463 if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
9464 return (1);
9465 }
9466
9467 if (did & 0xff000000) {
9468 return (1);
9469 }
9470
9471 /* We don't register our own did */
9472 if ((did == port->did) && (!(hba->flag & FC_LOOPBACK_MODE))) {
9473 return (1);
9474 }
9475
9476 if (did != FABRIC_DID) {
9477 if ((rval = emlxs_mb_check_sparm(hba, param))) {
9478 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
9479 "Invalid service parameters. did=%06x rval=%d", did,
9480 rval);
9481
9482 return (1);
9483 }
9484 }
9485
9486 /* Check if the node limit has been reached */
9487 if (port->node_count >= hba->max_nodes) {
9488 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
9489 "Limit reached. did=%06x count=%d", did,
9490 port->node_count);
9491
9492 return (1);
9493 }
9494
9495 node = emlxs_node_find_did(port, did, 1);
9496 rpip = EMLXS_NODE_TO_RPI(port, node);
9497
9498 rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
9499 (void *)ubp, (void *)iocbq);
9500
9501 return (rval);
9502
9503 } /* emlxs_sli4_reg_did() */
9504
9505
9506 static uint32_t
emlxs_sli4_unreg_node(emlxs_port_t * port,emlxs_node_t * node,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)9507 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
9508 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9509 {
9510 RPIobj_t *rpip;
9511 uint32_t rval;
9512
9513 if (!node) {
9514 /* Unreg all nodes */
9515 (void) emlxs_sli4_unreg_all_nodes(port);
9516 return (1);
9517 }
9518
9519 /* Check for base node */
9520 if (node == &port->node_base) {
9521 /* Just flush base node */
9522 (void) emlxs_tx_node_flush(port, &port->node_base,
9523 0, 0, 0);
9524
9525 (void) emlxs_chipq_node_flush(port, 0,
9526 &port->node_base, 0);
9527
9528 port->did = 0;
9529
9530 /* Return now */
9531 return (1);
9532 }
9533
9534 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9535 "unreg_node:%p did=%x rpi=%d",
9536 node, node->nlp_DID, node->nlp_Rpi);
9537
9538 rpip = EMLXS_NODE_TO_RPI(port, node);
9539
9540 if (!rpip) {
9541 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9542 "unreg_node:%p did=%x rpi=%d. RPI not found.",
9543 node, node->nlp_DID, node->nlp_Rpi);
9544
9545 emlxs_node_rm(port, node);
9546 return (1);
9547 }
9548
9549 rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
9550 (void *)iocbq);
9551
9552 return (rval);
9553
9554 } /* emlxs_sli4_unreg_node() */
9555
9556
9557 extern uint32_t
emlxs_sli4_unreg_all_nodes(emlxs_port_t * port)9558 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
9559 {
9560 NODELIST *nlp;
9561 int i;
9562 uint32_t found;
9563
9564 /* Set the node tags */
9565 /* We will process all nodes with this tag */
9566 rw_enter(&port->node_rwlock, RW_READER);
9567 found = 0;
9568 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9569 nlp = port->node_table[i];
9570 while (nlp != NULL) {
9571 found = 1;
9572 nlp->nlp_tag = 1;
9573 nlp = nlp->nlp_list_next;
9574 }
9575 }
9576 rw_exit(&port->node_rwlock);
9577
9578 if (!found) {
9579 return (0);
9580 }
9581
9582 for (;;) {
9583 rw_enter(&port->node_rwlock, RW_READER);
9584 found = 0;
9585 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9586 nlp = port->node_table[i];
9587 while (nlp != NULL) {
9588 if (!nlp->nlp_tag) {
9589 nlp = nlp->nlp_list_next;
9590 continue;
9591 }
9592 nlp->nlp_tag = 0;
9593 found = 1;
9594 break;
9595 }
9596
9597 if (found) {
9598 break;
9599 }
9600 }
9601 rw_exit(&port->node_rwlock);
9602
9603 if (!found) {
9604 break;
9605 }
9606
9607 (void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
9608 }
9609
9610 return (0);
9611
9612 } /* emlxs_sli4_unreg_all_nodes() */
9613
9614
9615 static void
emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)9616 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9617 {
9618 emlxs_port_t *port = &PPORT;
9619
9620 /* Handle link down */
9621 if ((cqe->un.link.link_status == ASYNC_EVENT_LOGICAL_LINK_DOWN) ||
9622 (cqe->un.link.link_status == ASYNC_EVENT_PHYS_LINK_DOWN)) {
9623 (void) emlxs_fcf_linkdown_notify(port);
9624
9625 mutex_enter(&EMLXS_PORT_LOCK);
9626 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9627 mutex_exit(&EMLXS_PORT_LOCK);
9628 return;
9629 }
9630
9631 /* Link is up */
9632
9633 /* Set linkspeed */
9634 switch (cqe->un.link.port_speed) {
9635 case PHY_1GHZ_LINK:
9636 hba->linkspeed = LA_1GHZ_LINK;
9637 break;
9638 case PHY_10GHZ_LINK:
9639 hba->linkspeed = LA_10GHZ_LINK;
9640 break;
9641 default:
9642 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9643 "sli4_handle_fcoe_link_event: Unknown link speed=%x.",
9644 cqe->un.link.port_speed);
9645 hba->linkspeed = 0;
9646 break;
9647 }
9648
9649 /* Set qos_linkspeed */
9650 hba->qos_linkspeed = cqe->un.link.qos_link_speed;
9651
9652 /* Set topology */
9653 hba->topology = TOPOLOGY_PT_PT;
9654
9655 mutex_enter(&EMLXS_PORT_LOCK);
9656 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9657 mutex_exit(&EMLXS_PORT_LOCK);
9658
9659 (void) emlxs_fcf_linkup_notify(port);
9660
9661 return;
9662
9663 } /* emlxs_sli4_handle_fcoe_link_event() */
9664
9665
9666 static void
emlxs_sli4_handle_fc_link_att(emlxs_hba_t * hba,CQE_ASYNC_t * cqe)9667 emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9668 {
9669 emlxs_port_t *port = &PPORT;
9670
9671 /* Handle link down */
9672 if (cqe->un.fc.att_type == ATT_TYPE_LINK_DOWN) {
9673 (void) emlxs_fcf_linkdown_notify(port);
9674
9675 mutex_enter(&EMLXS_PORT_LOCK);
9676 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9677 mutex_exit(&EMLXS_PORT_LOCK);
9678 return;
9679 }
9680
9681 /* Link is up */
9682
9683 /* Set linkspeed */
9684 switch (cqe->un.fc.port_speed) {
9685 case 1:
9686 hba->linkspeed = LA_1GHZ_LINK;
9687 break;
9688 case 2:
9689 hba->linkspeed = LA_2GHZ_LINK;
9690 break;
9691 case 4:
9692 hba->linkspeed = LA_4GHZ_LINK;
9693 break;
9694 case 8:
9695 hba->linkspeed = LA_8GHZ_LINK;
9696 break;
9697 case 10:
9698 hba->linkspeed = LA_10GHZ_LINK;
9699 break;
9700 case 16:
9701 hba->linkspeed = LA_16GHZ_LINK;
9702 break;
9703 case 32:
9704 hba->linkspeed = LA_32GHZ_LINK;
9705 break;
9706 default:
9707 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9708 "sli4_handle_fc_link_att: Unknown link speed=%x.",
9709 cqe->un.fc.port_speed);
9710 hba->linkspeed = 0;
9711 break;
9712 }
9713
9714 /* Set qos_linkspeed */
9715 hba->qos_linkspeed = cqe->un.fc.link_speed;
9716
9717 /* Set topology */
9718 hba->topology = cqe->un.fc.topology;
9719
9720 mutex_enter(&EMLXS_PORT_LOCK);
9721 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9722 mutex_exit(&EMLXS_PORT_LOCK);
9723
9724 (void) emlxs_fcf_linkup_notify(port);
9725
9726 return;
9727
9728 } /* emlxs_sli4_handle_fc_link_att() */
9729
9730
9731 static int
emlxs_sli4_init_extents(emlxs_hba_t * hba,MAILBOXQ * mbq)9732 emlxs_sli4_init_extents(emlxs_hba_t *hba, MAILBOXQ *mbq)
9733 {
9734 emlxs_port_t *port = &PPORT;
9735 MAILBOX4 *mb4;
9736 IOCTL_COMMON_EXTENTS *ep;
9737 uint32_t i;
9738 uint32_t ExtentCnt;
9739
9740 if (!(hba->sli.sli4.param.EXT)) {
9741 return (0);
9742 }
9743
9744 mb4 = (MAILBOX4 *) mbq;
9745
9746 /* Discover XRI Extents */
9747 bzero(mbq, sizeof (MAILBOXQ));
9748 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_XRI);
9749
9750 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9751 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9752 "Unable to discover XRI extents. Mailbox cmd=%x status=%x",
9753 mb4->mbxCommand, mb4->mbxStatus);
9754
9755 return (EIO);
9756 }
9757
9758 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9759 hba->sli.sli4.XRIExtSize = ep->params.response.ExtentSize;
9760 ExtentCnt = ep->params.response.ExtentCnt;
9761
9762 /* Allocate XRI Extents */
9763 bzero(mbq, sizeof (MAILBOXQ));
9764 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_XRI, ExtentCnt);
9765
9766 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9767 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9768 "Unable to allocate XRI extents. Mailbox cmd=%x status=%x",
9769 mb4->mbxCommand, mb4->mbxStatus);
9770
9771 return (EIO);
9772 }
9773 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9774
9775 bcopy((uint8_t *)ep->params.response.RscId,
9776 (uint8_t *)hba->sli.sli4.XRIBase,
9777 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9778
9779 hba->sli.sli4.XRIExtCount = ep->params.response.ExtentCnt;
9780 hba->sli.sli4.XRICount = hba->sli.sli4.XRIExtCount *
9781 hba->sli.sli4.XRIExtSize;
9782
9783 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9784 "XRI Ext: size=%d cnt=%d/%d",
9785 hba->sli.sli4.XRIExtSize,
9786 hba->sli.sli4.XRIExtCount, ExtentCnt);
9787
9788 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9789 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9790 "XRI Ext%d: %d, %d, %d, %d", i,
9791 hba->sli.sli4.XRIBase[i],
9792 hba->sli.sli4.XRIBase[i+1],
9793 hba->sli.sli4.XRIBase[i+2],
9794 hba->sli.sli4.XRIBase[i+3]);
9795 }
9796
9797
9798 /* Discover RPI Extents */
9799 bzero(mbq, sizeof (MAILBOXQ));
9800 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_RPI);
9801
9802 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9803 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9804 "Unable to discover RPI extents. Mailbox cmd=%x status=%x",
9805 mb4->mbxCommand, mb4->mbxStatus);
9806
9807 return (EIO);
9808 }
9809
9810 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9811 hba->sli.sli4.RPIExtSize = ep->params.response.ExtentSize;
9812 ExtentCnt = ep->params.response.ExtentCnt;
9813
9814 /* Allocate RPI Extents */
9815 bzero(mbq, sizeof (MAILBOXQ));
9816 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_RPI, ExtentCnt);
9817
9818 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9819 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9820 "Unable to allocate RPI extents. Mailbox cmd=%x status=%x",
9821 mb4->mbxCommand, mb4->mbxStatus);
9822
9823 return (EIO);
9824 }
9825 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9826
9827 bcopy((uint8_t *)ep->params.response.RscId,
9828 (uint8_t *)hba->sli.sli4.RPIBase,
9829 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9830
9831 hba->sli.sli4.RPIExtCount = ep->params.response.ExtentCnt;
9832 hba->sli.sli4.RPICount = hba->sli.sli4.RPIExtCount *
9833 hba->sli.sli4.RPIExtSize;
9834
9835 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9836 "RPI Ext: size=%d cnt=%d/%d",
9837 hba->sli.sli4.RPIExtSize,
9838 hba->sli.sli4.RPIExtCount, ExtentCnt);
9839
9840 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9841 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9842 "RPI Ext%d: %d, %d, %d, %d", i,
9843 hba->sli.sli4.RPIBase[i],
9844 hba->sli.sli4.RPIBase[i+1],
9845 hba->sli.sli4.RPIBase[i+2],
9846 hba->sli.sli4.RPIBase[i+3]);
9847 }
9848
9849
9850 /* Discover VPI Extents */
9851 bzero(mbq, sizeof (MAILBOXQ));
9852 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VPI);
9853
9854 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9855 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9856 "Unable to discover VPI extents. Mailbox cmd=%x status=%x",
9857 mb4->mbxCommand, mb4->mbxStatus);
9858
9859 return (EIO);
9860 }
9861
9862 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9863 hba->sli.sli4.VPIExtSize = ep->params.response.ExtentSize;
9864 ExtentCnt = ep->params.response.ExtentCnt;
9865
9866 /* Allocate VPI Extents */
9867 bzero(mbq, sizeof (MAILBOXQ));
9868 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VPI, ExtentCnt);
9869
9870 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9871 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9872 "Unable to allocate VPI extents. Mailbox cmd=%x status=%x",
9873 mb4->mbxCommand, mb4->mbxStatus);
9874
9875 return (EIO);
9876 }
9877 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9878
9879 bcopy((uint8_t *)ep->params.response.RscId,
9880 (uint8_t *)hba->sli.sli4.VPIBase,
9881 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9882
9883 hba->sli.sli4.VPIExtCount = ep->params.response.ExtentCnt;
9884 hba->sli.sli4.VPICount = hba->sli.sli4.VPIExtCount *
9885 hba->sli.sli4.VPIExtSize;
9886
9887 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9888 "VPI Ext: size=%d cnt=%d/%d",
9889 hba->sli.sli4.VPIExtSize,
9890 hba->sli.sli4.VPIExtCount, ExtentCnt);
9891
9892 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9894 "VPI Ext%d: %d, %d, %d, %d", i,
9895 hba->sli.sli4.VPIBase[i],
9896 hba->sli.sli4.VPIBase[i+1],
9897 hba->sli.sli4.VPIBase[i+2],
9898 hba->sli.sli4.VPIBase[i+3]);
9899 }
9900
9901 /* Discover VFI Extents */
9902 bzero(mbq, sizeof (MAILBOXQ));
9903 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VFI);
9904
9905 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9906 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9907 "Unable to discover VFI extents. Mailbox cmd=%x status=%x",
9908 mb4->mbxCommand, mb4->mbxStatus);
9909
9910 return (EIO);
9911 }
9912
9913 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9914 hba->sli.sli4.VFIExtSize = ep->params.response.ExtentSize;
9915 ExtentCnt = ep->params.response.ExtentCnt;
9916
9917 /* Allocate VFI Extents */
9918 bzero(mbq, sizeof (MAILBOXQ));
9919 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VFI, ExtentCnt);
9920
9921 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9922 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9923 "Unable to allocate VFI extents. Mailbox cmd=%x status=%x",
9924 mb4->mbxCommand, mb4->mbxStatus);
9925
9926 return (EIO);
9927 }
9928 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9929
9930 bcopy((uint8_t *)ep->params.response.RscId,
9931 (uint8_t *)hba->sli.sli4.VFIBase,
9932 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9933
9934 hba->sli.sli4.VFIExtCount = ep->params.response.ExtentCnt;
9935 hba->sli.sli4.VFICount = hba->sli.sli4.VFIExtCount *
9936 hba->sli.sli4.VFIExtSize;
9937
9938 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9939 "VFI Ext: size=%d cnt=%d/%d",
9940 hba->sli.sli4.VFIExtSize,
9941 hba->sli.sli4.VFIExtCount, ExtentCnt);
9942
9943 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9945 "VFI Ext%d: %d, %d, %d, %d", i,
9946 hba->sli.sli4.VFIBase[i],
9947 hba->sli.sli4.VFIBase[i+1],
9948 hba->sli.sli4.VFIBase[i+2],
9949 hba->sli.sli4.VFIBase[i+3]);
9950 }
9951
9952 return (0);
9953
9954 } /* emlxs_sli4_init_extents() */
9955
9956
9957 extern uint32_t
emlxs_sli4_index_to_rpi(emlxs_hba_t * hba,uint32_t index)9958 emlxs_sli4_index_to_rpi(emlxs_hba_t *hba, uint32_t index)
9959 {
9960 uint32_t i;
9961 uint32_t j;
9962 uint32_t rpi;
9963
9964 i = index / hba->sli.sli4.RPIExtSize;
9965 j = index % hba->sli.sli4.RPIExtSize;
9966 rpi = hba->sli.sli4.RPIBase[i] + j;
9967
9968 return (rpi);
9969
9970 } /* emlxs_sli4_index_to_rpi */
9971
9972
9973 extern uint32_t
emlxs_sli4_rpi_to_index(emlxs_hba_t * hba,uint32_t rpi)9974 emlxs_sli4_rpi_to_index(emlxs_hba_t *hba, uint32_t rpi)
9975 {
9976 uint32_t i;
9977 uint32_t lo;
9978 uint32_t hi;
9979 uint32_t index = hba->sli.sli4.RPICount;
9980
9981 for (i = 0; i < hba->sli.sli4.RPIExtCount; i++) {
9982 lo = hba->sli.sli4.RPIBase[i];
9983 hi = lo + hba->sli.sli4.RPIExtSize;
9984
9985 if ((rpi < hi) && (rpi >= lo)) {
9986 index = (i * hba->sli.sli4.RPIExtSize) + (rpi - lo);
9987 break;
9988 }
9989 }
9990
9991 return (index);
9992
9993 } /* emlxs_sli4_rpi_to_index */
9994
9995
9996 extern uint32_t
emlxs_sli4_index_to_xri(emlxs_hba_t * hba,uint32_t index)9997 emlxs_sli4_index_to_xri(emlxs_hba_t *hba, uint32_t index)
9998 {
9999 uint32_t i;
10000 uint32_t j;
10001 uint32_t xri;
10002
10003 i = index / hba->sli.sli4.XRIExtSize;
10004 j = index % hba->sli.sli4.XRIExtSize;
10005 xri = hba->sli.sli4.XRIBase[i] + j;
10006
10007 return (xri);
10008
10009 } /* emlxs_sli4_index_to_xri */
10010
10011
10012
10013
10014 extern uint32_t
emlxs_sli4_index_to_vpi(emlxs_hba_t * hba,uint32_t index)10015 emlxs_sli4_index_to_vpi(emlxs_hba_t *hba, uint32_t index)
10016 {
10017 uint32_t i;
10018 uint32_t j;
10019 uint32_t vpi;
10020
10021 i = index / hba->sli.sli4.VPIExtSize;
10022 j = index % hba->sli.sli4.VPIExtSize;
10023 vpi = hba->sli.sli4.VPIBase[i] + j;
10024
10025 return (vpi);
10026
10027 } /* emlxs_sli4_index_to_vpi */
10028
10029
10030 extern uint32_t
emlxs_sli4_vpi_to_index(emlxs_hba_t * hba,uint32_t vpi)10031 emlxs_sli4_vpi_to_index(emlxs_hba_t *hba, uint32_t vpi)
10032 {
10033 uint32_t i;
10034 uint32_t lo;
10035 uint32_t hi;
10036 uint32_t index = hba->sli.sli4.VPICount;
10037
10038 for (i = 0; i < hba->sli.sli4.VPIExtCount; i++) {
10039 lo = hba->sli.sli4.VPIBase[i];
10040 hi = lo + hba->sli.sli4.VPIExtSize;
10041
10042 if ((vpi < hi) && (vpi >= lo)) {
10043 index = (i * hba->sli.sli4.VPIExtSize) + (vpi - lo);
10044 break;
10045 }
10046 }
10047
10048 return (index);
10049
10050 } /* emlxs_sli4_vpi_to_index */
10051
10052
10053
10054
10055 extern uint32_t
emlxs_sli4_index_to_vfi(emlxs_hba_t * hba,uint32_t index)10056 emlxs_sli4_index_to_vfi(emlxs_hba_t *hba, uint32_t index)
10057 {
10058 uint32_t i;
10059 uint32_t j;
10060 uint32_t vfi;
10061
10062 i = index / hba->sli.sli4.VFIExtSize;
10063 j = index % hba->sli.sli4.VFIExtSize;
10064 vfi = hba->sli.sli4.VFIBase[i] + j;
10065
10066 return (vfi);
10067
10068 } /* emlxs_sli4_index_to_vfi */
10069
10070
10071 static uint16_t
emlxs_sli4_rqid_to_index(emlxs_hba_t * hba,uint16_t rqid)10072 emlxs_sli4_rqid_to_index(emlxs_hba_t *hba, uint16_t rqid)
10073 {
10074 uint16_t i;
10075
10076 if (rqid < 0xffff) {
10077 for (i = 0; i < EMLXS_MAX_RQS; i++) {
10078 if (hba->sli.sli4.rq[i].qid == rqid) {
10079 return (i);
10080 }
10081 }
10082 }
10083
10084 return (0xffff);
10085
10086 } /* emlxs_sli4_rqid_to_index */
10087
10088
10089 static uint16_t
emlxs_sli4_wqid_to_index(emlxs_hba_t * hba,uint16_t wqid)10090 emlxs_sli4_wqid_to_index(emlxs_hba_t *hba, uint16_t wqid)
10091 {
10092 uint16_t i;
10093
10094 if (wqid < 0xffff) {
10095 for (i = 0; i < EMLXS_MAX_WQS; i++) {
10096 if (hba->sli.sli4.wq[i].qid == wqid) {
10097 return (i);
10098 }
10099 }
10100 }
10101
10102 return (0xffff);
10103
10104 } /* emlxs_sli4_wqid_to_index */
10105
10106
10107 static uint16_t
emlxs_sli4_cqid_to_index(emlxs_hba_t * hba,uint16_t cqid)10108 emlxs_sli4_cqid_to_index(emlxs_hba_t *hba, uint16_t cqid)
10109 {
10110 uint16_t i;
10111
10112 if (cqid < 0xffff) {
10113 for (i = 0; i < EMLXS_MAX_CQS; i++) {
10114 if (hba->sli.sli4.cq[i].qid == cqid) {
10115 return (i);
10116 }
10117 }
10118 }
10119
10120 return (0xffff);
10121
10122 } /* emlxs_sli4_cqid_to_index */
10123