1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <emlxs.h>
28
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_SLI3_C);
31
32 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
33 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
34 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
35 uint32_t ha_copy);
36 #ifdef SFCT_SUPPORT
37 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
38 #endif /* SFCT_SUPPORT */
39
40 static uint32_t emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
41
42 static uint32_t emlxs_disable_traffic_cop = 1;
43
44 static int emlxs_sli3_map_hdw(emlxs_hba_t *hba);
45
46 static void emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
47
48 static int32_t emlxs_sli3_online(emlxs_hba_t *hba);
49
50 static void emlxs_sli3_offline(emlxs_hba_t *hba,
51 uint32_t reset_requested);
52
53 static uint32_t emlxs_sli3_hba_reset(emlxs_hba_t *hba,
54 uint32_t restart, uint32_t skip_post,
55 uint32_t quiesce);
56
57 static void emlxs_sli3_hba_kill(emlxs_hba_t *hba);
58 static void emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
59 static uint32_t emlxs_sli3_hba_init(emlxs_hba_t *hba);
60
61 static uint32_t emlxs_sli2_bde_setup(emlxs_port_t *port,
62 emlxs_buf_t *sbp);
63 static uint32_t emlxs_sli3_bde_setup(emlxs_port_t *port,
64 emlxs_buf_t *sbp);
65 static uint32_t emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
66 emlxs_buf_t *sbp);
67 static uint32_t emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
68 emlxs_buf_t *sbp);
69
70
71 static void emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
72 CHANNEL *rp, IOCBQ *iocb_cmd);
73
74
75 static uint32_t emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
76 MAILBOXQ *mbq, int32_t flg,
77 uint32_t tmo);
78
79
80 #ifdef SFCT_SUPPORT
81 static uint32_t emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
82 emlxs_buf_t *cmd_sbp, int channel);
83
84 #endif /* SFCT_SUPPORT */
85
86 static uint32_t emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
87 emlxs_buf_t *sbp, int ring);
88
89 static uint32_t emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
90 emlxs_buf_t *sbp);
91
92 static uint32_t emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
93 emlxs_buf_t *sbp);
94
95
96 static uint32_t emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
97 emlxs_buf_t *sbp);
98
99
100 static void emlxs_sli3_poll_intr(emlxs_hba_t *hba);
101
102 static int32_t emlxs_sli3_intx_intr(char *arg);
103 #ifdef MSI_SUPPORT
104 static uint32_t emlxs_sli3_msi_intr(char *arg1, char *arg2);
105 #endif /* MSI_SUPPORT */
106
107 static void emlxs_sli3_enable_intr(emlxs_hba_t *hba);
108
109 static void emlxs_sli3_disable_intr(emlxs_hba_t *hba,
110 uint32_t att);
111
112
113 static void emlxs_handle_ff_error(emlxs_hba_t *hba);
114
115 static uint32_t emlxs_handle_mb_event(emlxs_hba_t *hba);
116
117 static void emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
118
119 static uint32_t emlxs_mb_config_port(emlxs_hba_t *hba,
120 MAILBOXQ *mbq, uint32_t sli_mode,
121 uint32_t hbainit);
122 static void emlxs_enable_latt(emlxs_hba_t *hba);
123
124 static uint32_t emlxs_check_attention(emlxs_hba_t *hba);
125
126 static uint32_t emlxs_get_attention(emlxs_hba_t *hba,
127 int32_t msgid);
128 static void emlxs_proc_attention(emlxs_hba_t *hba,
129 uint32_t ha_copy);
130 /* static int emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
131 /* CHANNEL *cp, IOCBQ *iocbq); */
132 /* static void emlxs_update_HBQ_index(emlxs_hba_t *hba, */
133 /* uint32_t hbq_id); */
134 /* static void emlxs_hbq_free_all(emlxs_hba_t *hba, */
135 /* uint32_t hbq_id); */
136 static uint32_t emlxs_hbq_setup(emlxs_hba_t *hba,
137 uint32_t hbq_id);
138 static void emlxs_sli3_timer(emlxs_hba_t *hba);
139
140 static void emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
141
142 static uint32_t emlxs_sli3_reg_did(emlxs_port_t *port,
143 uint32_t did, SERV_PARM *param,
144 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
145 IOCBQ *iocbq);
146
147 static uint32_t emlxs_sli3_unreg_node(emlxs_port_t *port,
148 NODELIST *node, emlxs_buf_t *sbp,
149 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
150
151
152 /* Define SLI3 API functions */
153 emlxs_sli_api_t emlxs_sli3_api = {
154 emlxs_sli3_map_hdw,
155 emlxs_sli3_unmap_hdw,
156 emlxs_sli3_online,
157 emlxs_sli3_offline,
158 emlxs_sli3_hba_reset,
159 emlxs_sli3_hba_kill,
160 emlxs_sli3_issue_iocb_cmd,
161 emlxs_sli3_issue_mbox_cmd,
162 #ifdef SFCT_SUPPORT
163 emlxs_sli3_prep_fct_iocb,
164 #else
165 NULL,
166 #endif /* SFCT_SUPPORT */
167 emlxs_sli3_prep_fcp_iocb,
168 emlxs_sli3_prep_ip_iocb,
169 emlxs_sli3_prep_els_iocb,
170 emlxs_sli3_prep_ct_iocb,
171 emlxs_sli3_poll_intr,
172 emlxs_sli3_intx_intr,
173 emlxs_sli3_msi_intr,
174 emlxs_sli3_disable_intr,
175 emlxs_sli3_timer,
176 emlxs_sli3_poll_erratt,
177 emlxs_sli3_reg_did,
178 emlxs_sli3_unreg_node
179 };
180
181
182 /*
183 * emlxs_sli3_online()
184 *
185 * This routine will start initialization of the SLI2/3 HBA.
186 */
187 static int32_t
emlxs_sli3_online(emlxs_hba_t * hba)188 emlxs_sli3_online(emlxs_hba_t *hba)
189 {
190 emlxs_port_t *port = &PPORT;
191 emlxs_config_t *cfg;
192 emlxs_vpd_t *vpd;
193 MAILBOX *mb = NULL;
194 MAILBOXQ *mbq = NULL;
195 RING *rp;
196 CHANNEL *cp;
197 MATCHMAP *mp = NULL;
198 MATCHMAP *mp1 = NULL;
199 uint8_t *inptr;
200 uint8_t *outptr;
201 uint32_t status;
202 uint16_t i;
203 uint32_t j;
204 uint32_t read_rev_reset;
205 uint32_t key = 0;
206 uint32_t fw_check;
207 uint32_t kern_update = 0;
208 uint32_t rval = 0;
209 uint32_t offset;
210 uint8_t vpd_data[DMP_VPD_SIZE];
211 uint32_t MaxRbusSize;
212 uint32_t MaxIbusSize;
213 uint32_t sli_mode;
214 uint32_t sli_mode_mask;
215
216 cfg = &CFG;
217 vpd = &VPD;
218 MaxRbusSize = 0;
219 MaxIbusSize = 0;
220 read_rev_reset = 0;
221 hba->chan_count = MAX_RINGS;
222
223 if (hba->bus_type == SBUS_FC) {
224 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
225 }
226
227 /* Set the fw_check flag */
228 fw_check = cfg[CFG_FW_CHECK].current;
229
230 if ((fw_check & 0x04) ||
231 (hba->fw_flag & FW_UPDATE_KERNEL)) {
232 kern_update = 1;
233 }
234
235 hba->mbox_queue_flag = 0;
236 hba->sli.sli3.hc_copy = 0;
237 hba->fc_edtov = FF_DEF_EDTOV;
238 hba->fc_ratov = FF_DEF_RATOV;
239 hba->fc_altov = FF_DEF_ALTOV;
240 hba->fc_arbtov = FF_DEF_ARBTOV;
241
242 /*
243 * Get a buffer which will be used repeatedly for mailbox commands
244 */
245 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
246
247 mb = (MAILBOX *)mbq;
248
249 /* Initialize sli mode based on configuration parameter */
250 switch (cfg[CFG_SLI_MODE].current) {
251 case 2: /* SLI2 mode */
252 sli_mode = EMLXS_HBA_SLI2_MODE;
253 sli_mode_mask = EMLXS_SLI2_MASK;
254 break;
255
256 case 3: /* SLI3 mode */
257 sli_mode = EMLXS_HBA_SLI3_MODE;
258 sli_mode_mask = EMLXS_SLI3_MASK;
259 break;
260
261 case 0: /* Best available */
262 case 1: /* Best available */
263 default:
264 if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
265 sli_mode = EMLXS_HBA_SLI3_MODE;
266 sli_mode_mask = EMLXS_SLI3_MASK;
267 } else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
268 sli_mode = EMLXS_HBA_SLI2_MODE;
269 sli_mode_mask = EMLXS_SLI2_MASK;
270 } else {
271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
272 "No SLI mode available.");
273 rval = EIO;
274 goto failed;
275 }
276 break;
277 }
278 /* SBUS adapters only available in SLI2 */
279 if (hba->bus_type == SBUS_FC) {
280 sli_mode = EMLXS_HBA_SLI2_MODE;
281 sli_mode_mask = EMLXS_SLI2_MASK;
282 }
283
284 reset:
285 /* Reset & Initialize the adapter */
286 if (emlxs_sli3_hba_init(hba)) {
287 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
288 "Unable to init hba.");
289
290 rval = EIO;
291 goto failed;
292 }
293
294 #ifdef FMA_SUPPORT
295 /* Access handle validation */
296 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
297 != DDI_FM_OK) ||
298 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
299 != DDI_FM_OK) ||
300 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
301 != DDI_FM_OK)) {
302 EMLXS_MSGF(EMLXS_CONTEXT,
303 &emlxs_invalid_access_handle_msg, NULL);
304
305 rval = EIO;
306 goto failed;
307 }
308 #endif /* FMA_SUPPORT */
309
310 /* Check for PEGASUS (This is a special case) */
311 /* We need to check for dual channel adapter */
312 if (hba->model_info.device_id == PCI_DEVICE_ID_PEGASUS) {
313 /* Try to determine if this is a DC adapter */
314 if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
315 if (MaxRbusSize == REDUCED_SRAM_CFG) {
316 /* LP9802DC */
317 for (i = 1; i < emlxs_pci_model_count; i++) {
318 if (emlxs_pci_model[i].id == LP9802DC) {
319 bcopy(&emlxs_pci_model[i],
320 &hba->model_info,
321 sizeof (emlxs_model_t));
322 break;
323 }
324 }
325 } else if (hba->model_info.id != LP9802) {
326 /* LP9802 */
327 for (i = 1; i < emlxs_pci_model_count; i++) {
328 if (emlxs_pci_model[i].id == LP9802) {
329 bcopy(&emlxs_pci_model[i],
330 &hba->model_info,
331 sizeof (emlxs_model_t));
332 break;
333 }
334 }
335 }
336 }
337 }
338
339 /*
340 * Setup and issue mailbox READ REV command
341 */
342 vpd->opFwRev = 0;
343 vpd->postKernRev = 0;
344 vpd->sli1FwRev = 0;
345 vpd->sli2FwRev = 0;
346 vpd->sli3FwRev = 0;
347 vpd->sli4FwRev = 0;
348
349 vpd->postKernName[0] = 0;
350 vpd->opFwName[0] = 0;
351 vpd->sli1FwName[0] = 0;
352 vpd->sli2FwName[0] = 0;
353 vpd->sli3FwName[0] = 0;
354 vpd->sli4FwName[0] = 0;
355
356 vpd->opFwLabel[0] = 0;
357 vpd->sli1FwLabel[0] = 0;
358 vpd->sli2FwLabel[0] = 0;
359 vpd->sli3FwLabel[0] = 0;
360 vpd->sli4FwLabel[0] = 0;
361
362 /* Sanity check */
363 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
364 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
365 "Adapter / SLI mode mismatch mask:x%x",
366 hba->model_info.sli_mask);
367
368 rval = EIO;
369 goto failed;
370 }
371
372 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
373 emlxs_mb_read_rev(hba, mbq, 0);
374 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
375 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
376 "Unable to read rev. Mailbox cmd=%x status=%x",
377 mb->mbxCommand, mb->mbxStatus);
378
379 rval = EIO;
380 goto failed;
381 }
382
383 if (mb->un.varRdRev.rr == 0) {
384 /* Old firmware */
385 if (read_rev_reset == 0) {
386 read_rev_reset = 1;
387
388 goto reset;
389 } else {
390 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
391 "Outdated firmware detected.");
392 }
393
394 vpd->rBit = 0;
395 } else {
396 if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
397 if (read_rev_reset == 0) {
398 read_rev_reset = 1;
399
400 goto reset;
401 } else {
402 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
403 "Non-operational firmware detected. "
404 "type=%x",
405 mb->un.varRdRev.un.b.ProgType);
406 }
407 }
408
409 vpd->rBit = 1;
410 vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
411 bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
412 16);
413 vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
414 bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
415 16);
416
417 /*
418 * Lets try to read the SLI3 version
419 * Setup and issue mailbox READ REV(v3) command
420 */
421 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
422
423 /* Reuse mbq from previous mbox */
424 bzero(mbq, sizeof (MAILBOXQ));
425
426 emlxs_mb_read_rev(hba, mbq, 1);
427
428 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
429 MBX_SUCCESS) {
430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
431 "Unable to read rev (v3). Mailbox cmd=%x status=%x",
432 mb->mbxCommand, mb->mbxStatus);
433
434 rval = EIO;
435 goto failed;
436 }
437
438 if (mb->un.varRdRev.rf3) {
439 /*
440 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
441 * Not needed
442 */
443 vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
444 bcopy((char *)mb->un.varRdRev.sliFwName2,
445 vpd->sli3FwLabel, 16);
446 }
447 }
448
449 if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
450 if (vpd->sli2FwRev) {
451 sli_mode = EMLXS_HBA_SLI2_MODE;
452 sli_mode_mask = EMLXS_SLI2_MASK;
453 } else {
454 sli_mode = 0;
455 sli_mode_mask = 0;
456 }
457 }
458
459 else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
460 if (vpd->sli3FwRev) {
461 sli_mode = EMLXS_HBA_SLI3_MODE;
462 sli_mode_mask = EMLXS_SLI3_MASK;
463 } else {
464 sli_mode = 0;
465 sli_mode_mask = 0;
466 }
467 }
468
469 if (!(hba->model_info.sli_mask & sli_mode_mask)) {
470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
471 "Firmware not available. sli-mode=%d",
472 cfg[CFG_SLI_MODE].current);
473
474 rval = EIO;
475 goto failed;
476 }
477
478 /* Save information as VPD data */
479 vpd->postKernRev = mb->un.varRdRev.postKernRev;
480 vpd->opFwRev = mb->un.varRdRev.opFwRev;
481 bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
482 vpd->biuRev = mb->un.varRdRev.biuRev;
483 vpd->smRev = mb->un.varRdRev.smRev;
484 vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
485 vpd->endecRev = mb->un.varRdRev.endecRev;
486 vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
487 vpd->fcphLow = mb->un.varRdRev.fcphLow;
488 vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
489 vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
490
491 /* Decode FW names */
492 emlxs_decode_version(vpd->postKernRev, vpd->postKernName,
493 sizeof (vpd->postKernName));
494 emlxs_decode_version(vpd->opFwRev, vpd->opFwName,
495 sizeof (vpd->opFwName));
496 emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName,
497 sizeof (vpd->sli1FwName));
498 emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName,
499 sizeof (vpd->sli2FwName));
500 emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName,
501 sizeof (vpd->sli3FwName));
502 emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName,
503 sizeof (vpd->sli4FwName));
504
505 /* Decode FW labels */
506 emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1,
507 sizeof (vpd->opFwLabel));
508 emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1,
509 sizeof (vpd->sli1FwLabel));
510 emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1,
511 sizeof (vpd->sli2FwLabel));
512 emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1,
513 sizeof (vpd->sli3FwLabel));
514 emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1,
515 sizeof (vpd->sli4FwLabel));
516
517 /* Reuse mbq from previous mbox */
518 bzero(mbq, sizeof (MAILBOXQ));
519
520 key = emlxs_get_key(hba, mbq);
521
522 /* Get adapter VPD information */
523 offset = 0;
524 bzero(vpd_data, sizeof (vpd_data));
525 vpd->port_index = (uint32_t)-1;
526
527 while (offset < DMP_VPD_SIZE) {
528 /* Reuse mbq from previous mbox */
529 bzero(mbq, sizeof (MAILBOXQ));
530
531 emlxs_mb_dump_vpd(hba, mbq, offset);
532 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
533 MBX_SUCCESS) {
534 /*
535 * Let it go through even if failed.
536 * Not all adapter's have VPD info and thus will
537 * fail here. This is not a problem
538 */
539
540 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
541 "No VPD found. offset=%x status=%x", offset,
542 mb->mbxStatus);
543 break;
544 } else {
545 if (mb->un.varDmp.ra == 1) {
546 uint32_t *lp1, *lp2;
547 uint32_t bsize;
548 uint32_t wsize;
549
550 /*
551 * mb->un.varDmp.word_cnt is actually byte
552 * count for the dump reply
553 */
554 bsize = mb->un.varDmp.word_cnt;
555
556 /* Stop if no data was received */
557 if (bsize == 0) {
558 break;
559 }
560
561 /* Check limit on byte size */
562 bsize = (bsize >
563 (sizeof (vpd_data) - offset)) ?
564 (sizeof (vpd_data) - offset) : bsize;
565
566 /*
567 * Convert size from bytes to words with
568 * minimum of 1 word
569 */
570 wsize = (bsize > 4) ? (bsize >> 2) : 1;
571
572 /*
573 * Transfer data into vpd_data buffer one
574 * word at a time
575 */
576 lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
577 lp2 = (uint32_t *)&vpd_data[offset];
578
579 for (i = 0; i < wsize; i++) {
580 status = *lp1++;
581 *lp2++ = BE_SWAP32(status);
582 }
583
584 /* Increment total byte count saved */
585 offset += (wsize << 2);
586
587 /*
588 * Stop if less than a full transfer was
589 * received
590 */
591 if (wsize < DMP_VPD_DUMP_WCOUNT) {
592 break;
593 }
594
595 } else {
596 EMLXS_MSGF(EMLXS_CONTEXT,
597 &emlxs_init_debug_msg,
598 "No VPD acknowledgment. offset=%x",
599 offset);
600 break;
601 }
602 }
603
604 }
605
606 if (vpd_data[0]) {
607 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
608
609 /*
610 * If there is a VPD part number, and it does not
611 * match the current default HBA model info,
612 * replace the default data with an entry that
613 * does match.
614 *
615 * After emlxs_parse_vpd model holds the VPD value
616 * for V2 and part_num hold the value for PN. These
617 * 2 values are NOT necessarily the same.
618 */
619
620 rval = 0;
621 if ((vpd->model[0] != 0) &&
622 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
623
624 /* First scan for a V2 match */
625
626 for (i = 1; i < emlxs_pci_model_count; i++) {
627 if (strcmp(&vpd->model[0],
628 emlxs_pci_model[i].model) == 0) {
629 bcopy(&emlxs_pci_model[i],
630 &hba->model_info,
631 sizeof (emlxs_model_t));
632 rval = 1;
633 break;
634 }
635 }
636 }
637
638 if (!rval && (vpd->part_num[0] != 0) &&
639 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
640
641 /* Next scan for a PN match */
642
643 for (i = 1; i < emlxs_pci_model_count; i++) {
644 if (strcmp(&vpd->part_num[0],
645 emlxs_pci_model[i].model) == 0) {
646 bcopy(&emlxs_pci_model[i],
647 &hba->model_info,
648 sizeof (emlxs_model_t));
649 break;
650 }
651 }
652 }
653
654 /*
655 * Now lets update hba->model_info with the real
656 * VPD data, if any.
657 */
658
659 /*
660 * Replace the default model description with vpd data
661 */
662 if (vpd->model_desc[0] != 0) {
663 (void) strncpy(hba->model_info.model_desc,
664 vpd->model_desc,
665 (sizeof (hba->model_info.model_desc)-1));
666 }
667
668 /* Replace the default model with vpd data */
669 if (vpd->model[0] != 0) {
670 (void) strncpy(hba->model_info.model, vpd->model,
671 (sizeof (hba->model_info.model)-1));
672 }
673
674 /* Replace the default program types with vpd data */
675 if (vpd->prog_types[0] != 0) {
676 emlxs_parse_prog_types(hba, vpd->prog_types);
677 }
678 }
679
680 /*
681 * Since the adapter model may have changed with the vpd data
682 * lets double check if adapter is not supported
683 */
684 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
685 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
686 "Unsupported adapter found. "
687 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
688 hba->model_info.id, hba->model_info.device_id,
689 hba->model_info.ssdid, hba->model_info.model);
690
691 rval = EIO;
692 goto failed;
693 }
694
695 /* Read the adapter's wakeup parms */
696 (void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
697 emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
698 vpd->boot_version, sizeof (vpd->boot_version));
699
700 /* Get fcode version property */
701 emlxs_get_fcode_version(hba);
702
703 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
704 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
705 vpd->opFwRev, vpd->sli1FwRev);
706
707 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
708 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
709 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
710
711 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
712 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
713
714 /*
715 * If firmware checking is enabled and the adapter model indicates
716 * a firmware image, then perform firmware version check
717 */
718 hba->fw_flag = 0;
719 hba->fw_timer = 0;
720
721 if (((fw_check & 0x1) &&
722 (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
723 hba->model_info.fwid) || ((fw_check & 0x2) &&
724 hba->model_info.fwid)) {
725 emlxs_firmware_t *fw;
726
727 /* Find firmware image indicated by adapter model */
728 fw = NULL;
729 for (i = 0; i < emlxs_fw_count; i++) {
730 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
731 fw = &emlxs_fw_table[i];
732 break;
733 }
734 }
735
736 /*
737 * If the image was found, then verify current firmware
738 * versions of adapter
739 */
740 if (fw) {
741 if (!kern_update &&
742 ((fw->kern && (vpd->postKernRev != fw->kern)) ||
743 (fw->stub && (vpd->opFwRev != fw->stub)))) {
744
745 hba->fw_flag |= FW_UPDATE_NEEDED;
746
747 } else if ((fw->kern && (vpd->postKernRev !=
748 fw->kern)) ||
749 (fw->stub && (vpd->opFwRev != fw->stub)) ||
750 (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
751 (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
752 (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
753 (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
755 "Firmware update needed. "
756 "Updating. id=%d fw=%d",
757 hba->model_info.id, hba->model_info.fwid);
758
759 #ifdef MODFW_SUPPORT
760 /*
761 * Load the firmware image now
762 * If MODFW_SUPPORT is not defined, the
763 * firmware image will already be defined
764 * in the emlxs_fw_table
765 */
766 emlxs_fw_load(hba, fw);
767 #endif /* MODFW_SUPPORT */
768
769 if (fw->image && fw->size) {
770 uint32_t rc;
771
772 rc = emlxs_fw_download(hba,
773 (char *)fw->image, fw->size, 0);
774 if ((rc != FC_SUCCESS) &&
775 (rc != EMLXS_REBOOT_REQUIRED)) {
776 EMLXS_MSGF(EMLXS_CONTEXT,
777 &emlxs_init_msg,
778 "Firmware update failed.");
779 hba->fw_flag |=
780 FW_UPDATE_NEEDED;
781 }
782 #ifdef MODFW_SUPPORT
783 /*
784 * Unload the firmware image from
785 * kernel memory
786 */
787 emlxs_fw_unload(hba, fw);
788 #endif /* MODFW_SUPPORT */
789
790 fw_check = 0;
791
792 goto reset;
793 }
794
795 hba->fw_flag |= FW_UPDATE_NEEDED;
796
797 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
798 "Firmware image unavailable.");
799 } else {
800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
801 "Firmware update not needed.");
802 }
803 } else {
804 /* This should not happen */
805
806 /*
807 * This means either the adapter database is not
808 * correct or a firmware image is missing from the
809 * compile
810 */
811 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
812 "Firmware image unavailable. id=%d fw=%d",
813 hba->model_info.id, hba->model_info.fwid);
814 }
815 }
816
817 /*
818 * Add our interrupt routine to kernel's interrupt chain & enable it
819 * If MSI is enabled this will cause Solaris to program the MSI address
820 * and data registers in PCI config space
821 */
822 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
824 "Unable to add interrupt(s).");
825
826 rval = EIO;
827 goto failed;
828 }
829
830 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
831
832 /* Reuse mbq from previous mbox */
833 bzero(mbq, sizeof (MAILBOXQ));
834
835 (void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
836 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
838 "Unable to configure port. "
839 "Mailbox cmd=%x status=%x slimode=%d key=%x",
840 mb->mbxCommand, mb->mbxStatus, sli_mode, key);
841
842 for (sli_mode--; sli_mode > 0; sli_mode--) {
843 /* Check if sli_mode is supported by this adapter */
844 if (hba->model_info.sli_mask &
845 EMLXS_SLI_MASK(sli_mode)) {
846 sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
847 break;
848 }
849 }
850
851 if (sli_mode) {
852 fw_check = 0;
853
854 goto reset;
855 }
856
857 hba->flag &= ~FC_SLIM2_MODE;
858
859 rval = EIO;
860 goto failed;
861 }
862
863 /* Check if SLI3 mode was achieved */
864 if (mb->un.varCfgPort.rMA &&
865 (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
866
867 if (mb->un.varCfgPort.vpi_max > 1) {
868 hba->flag |= FC_NPIV_ENABLED;
869
870 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
871 hba->vpi_max =
872 min(mb->un.varCfgPort.vpi_max,
873 MAX_VPORTS - 1);
874 } else {
875 hba->vpi_max =
876 min(mb->un.varCfgPort.vpi_max,
877 MAX_VPORTS_LIMITED - 1);
878 }
879 }
880
881 #if (EMLXS_MODREV >= EMLXS_MODREV5)
882 hba->fca_tran->fca_num_npivports =
883 (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
884 #endif /* >= EMLXS_MODREV5 */
885
886 if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
887 hba->flag |= FC_HBQ_ENABLED;
888 }
889
890 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
891 "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
892 } else {
893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
894 "SLI2 mode: flag=%x", hba->flag);
895 sli_mode = EMLXS_HBA_SLI2_MODE;
896 sli_mode_mask = EMLXS_SLI2_MASK;
897 hba->sli_mode = sli_mode;
898 #if (EMLXS_MODREV >= EMLXS_MODREV5)
899 hba->fca_tran->fca_num_npivports = 0;
900 #endif /* >= EMLXS_MODREV5 */
901
902 }
903
904 /* Get and save the current firmware version (based on sli_mode) */
905 emlxs_decode_firmware_rev(hba, vpd);
906
907 emlxs_pcix_mxr_update(hba, 0);
908
909 /* Reuse mbq from previous mbox */
910 bzero(mbq, sizeof (MAILBOXQ));
911
912 emlxs_mb_read_config(hba, mbq);
913 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
914 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
915 "Unable to read configuration. Mailbox cmd=%x status=%x",
916 mb->mbxCommand, mb->mbxStatus);
917
918 rval = EIO;
919 goto failed;
920 }
921
922 /* Save the link speed capabilities */
923 vpd->link_speed = (uint16_t)mb->un.varRdConfig.lmt;
924 emlxs_process_link_speed(hba);
925
926 /* Set the max node count */
927 if (cfg[CFG_NUM_NODES].current > 0) {
928 hba->max_nodes =
929 min(cfg[CFG_NUM_NODES].current,
930 mb->un.varRdConfig.max_rpi);
931 } else {
932 hba->max_nodes = mb->un.varRdConfig.max_rpi;
933 }
934
935 /* Set the io throttle */
936 hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
937
938 /* Set max_iotag */
939 if (cfg[CFG_NUM_IOTAGS].current) {
940 hba->max_iotag = (uint16_t)cfg[CFG_NUM_IOTAGS].current;
941 } else {
942 hba->max_iotag = mb->un.varRdConfig.max_xri;
943 }
944
945 /* Set out-of-range iotag base */
946 hba->fc_oor_iotag = hba->max_iotag;
947
948 /*
949 * Allocate some memory for buffers
950 */
951 if (emlxs_mem_alloc_buffer(hba) == 0) {
952 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
953 "Unable to allocate memory buffers.");
954
955 EMLXS_STATE_CHANGE(hba, FC_ERROR);
956 return (ENOMEM);
957 }
958
959 /*
960 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
961 */
962 if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) ||
963 ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0)) {
964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
965 "Unable to allocate diag buffers.");
966
967 rval = ENOMEM;
968 goto failed;
969 }
970
971 bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
972 MEM_ELSBUF_SIZE);
973 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
974 DDI_DMA_SYNC_FORDEV);
975
976 bzero(mp1->virt, MEM_ELSBUF_SIZE);
977 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
978 DDI_DMA_SYNC_FORDEV);
979
980 /* Reuse mbq from previous mbox */
981 bzero(mbq, sizeof (MAILBOXQ));
982
983 (void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
984
985 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
987 "Unable to run BIU diag. Mailbox cmd=%x status=%x",
988 mb->mbxCommand, mb->mbxStatus);
989
990 rval = EIO;
991 goto failed;
992 }
993
994 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
995 DDI_DMA_SYNC_FORKERNEL);
996
997 #ifdef FMA_SUPPORT
998 if (mp->dma_handle) {
999 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
1000 != DDI_FM_OK) {
1001 EMLXS_MSGF(EMLXS_CONTEXT,
1002 &emlxs_invalid_dma_handle_msg,
1003 "sli3_online: hdl=%p",
1004 mp->dma_handle);
1005 rval = EIO;
1006 goto failed;
1007 }
1008 }
1009
1010 if (mp1->dma_handle) {
1011 if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
1012 != DDI_FM_OK) {
1013 EMLXS_MSGF(EMLXS_CONTEXT,
1014 &emlxs_invalid_dma_handle_msg,
1015 "sli3_online: hdl=%p",
1016 mp1->dma_handle);
1017 rval = EIO;
1018 goto failed;
1019 }
1020 }
1021 #endif /* FMA_SUPPORT */
1022
1023 outptr = mp->virt;
1024 inptr = mp1->virt;
1025
1026 for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
1027 if (*outptr++ != *inptr++) {
1028 outptr--;
1029 inptr--;
1030
1031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1032 "BIU diagnostic failed. "
1033 "offset %x value %x should be %x.",
1034 i, (uint32_t)*inptr, (uint32_t)*outptr);
1035
1036 rval = EIO;
1037 goto failed;
1038 }
1039 }
1040
1041 /* Free the buffers since we were polling */
1042 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1043 mp = NULL;
1044 emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1045 mp1 = NULL;
1046
1047 hba->channel_fcp = FC_FCP_RING;
1048 hba->channel_els = FC_ELS_RING;
1049 hba->channel_ip = FC_IP_RING;
1050 hba->channel_ct = FC_CT_RING;
1051 hba->sli.sli3.ring_count = MAX_RINGS;
1052
1053 hba->channel_tx_count = 0;
1054 hba->io_count = 0;
1055 hba->fc_iotag = 1;
1056
1057 for (i = 0; i < hba->chan_count; i++) {
1058 cp = &hba->chan[i];
1059
1060 /* 1 to 1 mapping between ring and channel */
1061 cp->iopath = (void *)&hba->sli.sli3.ring[i];
1062
1063 cp->hba = hba;
1064 cp->channelno = i;
1065 }
1066
1067 /*
1068 * Setup and issue mailbox CONFIGURE RING command
1069 */
1070 for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1071 /*
1072 * Initialize cmd/rsp ring pointers
1073 */
1074 rp = &hba->sli.sli3.ring[i];
1075
1076 /* 1 to 1 mapping between ring and channel */
1077 rp->channelp = &hba->chan[i];
1078
1079 rp->hba = hba;
1080 rp->ringno = (uint8_t)i;
1081
1082 rp->fc_cmdidx = 0;
1083 rp->fc_rspidx = 0;
1084 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1085
1086 /* Reuse mbq from previous mbox */
1087 bzero(mbq, sizeof (MAILBOXQ));
1088
1089 emlxs_mb_config_ring(hba, i, mbq);
1090 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1091 MBX_SUCCESS) {
1092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1093 "Unable to configure ring. "
1094 "Mailbox cmd=%x status=%x",
1095 mb->mbxCommand, mb->mbxStatus);
1096
1097 rval = EIO;
1098 goto failed;
1099 }
1100 }
1101
1102 /*
1103 * Setup link timers
1104 */
1105 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1106
1107 /* Reuse mbq from previous mbox */
1108 bzero(mbq, sizeof (MAILBOXQ));
1109
1110 emlxs_mb_config_link(hba, mbq);
1111 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1113 "Unable to configure link. Mailbox cmd=%x status=%x",
1114 mb->mbxCommand, mb->mbxStatus);
1115
1116 rval = EIO;
1117 goto failed;
1118 }
1119
1120 #ifdef MAX_RRDY_SUPPORT
1121 /* Set MAX_RRDY if one is provided */
1122 if (cfg[CFG_MAX_RRDY].current) {
1123
1124 /* Reuse mbq from previous mbox */
1125 bzero(mbq, sizeof (MAILBOXQ));
1126
1127 emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1128 cfg[CFG_MAX_RRDY].current);
1129
1130 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1131 MBX_SUCCESS) {
1132 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1133 "MAX_RRDY: Unable to set. status=%x " \
1134 "value=%d",
1135 mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1136 } else {
1137 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1138 "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1139 }
1140 }
1141 #endif /* MAX_RRDY_SUPPORT */
1142
1143 /* Reuse mbq from previous mbox */
1144 bzero(mbq, sizeof (MAILBOXQ));
1145
1146 /*
1147 * We need to get login parameters for NID
1148 */
1149 (void) emlxs_mb_read_sparam(hba, mbq);
1150 mp = (MATCHMAP *)mbq->bp;
1151 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1153 "Unable to read parameters. Mailbox cmd=%x status=%x",
1154 mb->mbxCommand, mb->mbxStatus);
1155
1156 rval = EIO;
1157 goto failed;
1158 }
1159
1160 /* Free the buffer since we were polling */
1161 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1162 mp = NULL;
1163
1164 /* If no serial number in VPD data, then use the WWPN */
1165 if (vpd->serial_num[0] == 0) {
1166 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1167 for (i = 0; i < 12; i++) {
1168 status = *outptr++;
1169 j = ((status & 0xf0) >> 4);
1170 if (j <= 9) {
1171 vpd->serial_num[i] =
1172 (char)((uint8_t)'0' + (uint8_t)j);
1173 } else {
1174 vpd->serial_num[i] =
1175 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1176 }
1177
1178 i++;
1179 j = (status & 0xf);
1180 if (j <= 9) {
1181 vpd->serial_num[i] =
1182 (char)((uint8_t)'0' + (uint8_t)j);
1183 } else {
1184 vpd->serial_num[i] =
1185 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1186 }
1187 }
1188
1189 /*
1190 * Set port number and port index to zero
1191 * The WWN's are unique to each port and therefore port_num
1192 * must equal zero. This effects the hba_fru_details structure
1193 * in fca_bind_port()
1194 */
1195 vpd->port_num[0] = 0;
1196 vpd->port_index = 0;
1197 }
1198
1199 /*
1200 * Make first attempt to set a port index
1201 * Check if this is a multifunction adapter
1202 */
1203 if ((vpd->port_index == (uint32_t)-1) &&
1204 (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1205 char *buffer;
1206 int32_t i;
1207
1208 /*
1209 * The port address looks like this:
1210 * 1 - for port index 0
1211 * 1,1 - for port index 1
1212 * 1,2 - for port index 2
1213 */
1214 buffer = ddi_get_name_addr(hba->dip);
1215
1216 if (buffer) {
1217 vpd->port_index = 0;
1218
1219 /* Reverse scan for a comma */
1220 for (i = strlen(buffer) - 1; i > 0; i--) {
1221 if (buffer[i] == ',') {
1222 /* Comma found - set index now */
1223 vpd->port_index =
1224 emlxs_strtol(&buffer[i + 1], 10);
1225 break;
1226 }
1227 }
1228 }
1229 }
1230
1231 /* Make final attempt to set a port index */
1232 if (vpd->port_index == (uint32_t)-1) {
1233 dev_info_t *p_dip;
1234 dev_info_t *c_dip;
1235
1236 p_dip = ddi_get_parent(hba->dip);
1237 c_dip = ddi_get_child(p_dip);
1238
1239 vpd->port_index = 0;
1240 while (c_dip && (hba->dip != c_dip)) {
1241 c_dip = ddi_get_next_sibling(c_dip);
1242 vpd->port_index++;
1243 }
1244 }
1245
1246 if (vpd->port_num[0] == 0) {
1247 if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1248 (void) snprintf(vpd->port_num,
1249 (sizeof (vpd->port_num)-1),
1250 "%d", vpd->port_index);
1251 }
1252 }
1253
1254 if (vpd->id[0] == 0) {
1255 (void) strncpy(vpd->id, hba->model_info.model_desc,
1256 (sizeof (vpd->id)-1));
1257 }
1258
1259 if (vpd->manufacturer[0] == 0) {
1260 (void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1261 (sizeof (vpd->manufacturer)-1));
1262 }
1263
1264 if (vpd->part_num[0] == 0) {
1265 (void) strncpy(vpd->part_num, hba->model_info.model,
1266 (sizeof (vpd->part_num)-1));
1267 }
1268
1269 if (vpd->model_desc[0] == 0) {
1270 (void) strncpy(vpd->model_desc, hba->model_info.model_desc,
1271 (sizeof (vpd->model_desc)-1));
1272 }
1273
1274 if (vpd->model[0] == 0) {
1275 (void) strncpy(vpd->model, hba->model_info.model,
1276 (sizeof (vpd->model)-1));
1277 }
1278
1279 if (vpd->prog_types[0] == 0) {
1280 emlxs_build_prog_types(hba, vpd);
1281 }
1282
1283 /* Create the symbolic names */
1284 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1285 "Emulex %s FV%s DV%s %s",
1286 hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1287 (char *)utsname.nodename);
1288
1289 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1290 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1291 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1292 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1293 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1294
1295 if (cfg[CFG_NETWORK_ON].current) {
1296 if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1297 (hba->sparam.portName.IEEEextMsn != 0) ||
1298 (hba->sparam.portName.IEEEextLsb != 0)) {
1299
1300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1301 "WWPN doesn't conform to IP profile: "
1302 "nameType=%x. Disabling networking.",
1303 hba->sparam.portName.nameType);
1304
1305 cfg[CFG_NETWORK_ON].current = 0;
1306 }
1307 }
1308
1309 if (cfg[CFG_NETWORK_ON].current) {
1310 /* Reuse mbq from previous mbox */
1311 bzero(mbq, sizeof (MAILBOXQ));
1312
1313 /* Issue CONFIG FARP */
1314 emlxs_mb_config_farp(hba, mbq);
1315 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1316 MBX_SUCCESS) {
1317 /*
1318 * Let it go through even if failed.
1319 */
1320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1321 "Unable to configure FARP. "
1322 "Mailbox cmd=%x status=%x",
1323 mb->mbxCommand, mb->mbxStatus);
1324 }
1325 }
1326 #ifdef MSI_SUPPORT
1327 /* Configure MSI map if required */
1328 if (hba->intr_count > 1) {
1329
1330 if (hba->intr_type == DDI_INTR_TYPE_MSIX) {
1331 /* always start from 0 */
1332 hba->last_msiid = 0;
1333 }
1334
1335 /* Reuse mbq from previous mbox */
1336 bzero(mbq, sizeof (MAILBOXQ));
1337
1338 emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1339
1340 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1341 MBX_SUCCESS) {
1342 goto msi_configured;
1343 }
1344
1345 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1346 "Unable to config MSIX. Mailbox cmd=0x%x status=0x%x",
1347 mb->mbxCommand, mb->mbxStatus);
1348
1349 /* Reuse mbq from previous mbox */
1350 bzero(mbq, sizeof (MAILBOXQ));
1351
1352 emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1353
1354 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1355 MBX_SUCCESS) {
1356 goto msi_configured;
1357 }
1358
1359
1360 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1361 "Unable to config MSI. Mailbox cmd=0x%x status=0x%x",
1362 mb->mbxCommand, mb->mbxStatus);
1363
1364 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1365 "Attempting single interrupt mode...");
1366
1367 /* First cleanup old interrupts */
1368 (void) emlxs_msi_remove(hba);
1369 (void) emlxs_msi_uninit(hba);
1370
1371 status = emlxs_msi_init(hba, 1);
1372
1373 if (status != DDI_SUCCESS) {
1374 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1375 "Unable to initialize interrupt. status=%d",
1376 status);
1377
1378 rval = EIO;
1379 goto failed;
1380 }
1381
1382 /*
1383 * Reset adapter - The adapter needs to be reset because
1384 * the bus cannot handle the MSI change without handshaking
1385 * with the adapter again
1386 */
1387
1388 (void) emlxs_mem_free_buffer(hba);
1389 fw_check = 0;
1390 goto reset;
1391 }
1392
1393 msi_configured:
1394
1395
1396 if ((hba->intr_count >= 1) &&
1397 (hba->sli_mode == EMLXS_HBA_SLI3_MODE)) {
1398 /* intr_count is a sequence of msi id */
1399 /* Setup msi2chan[msi_id] */
1400 for (i = 0; i < hba->intr_count; i ++) {
1401 hba->msi2chan[i] = i;
1402 if (i >= hba->chan_count)
1403 hba->msi2chan[i] = (i - hba->chan_count);
1404 }
1405 }
1406 #endif /* MSI_SUPPORT */
1407
1408 /*
1409 * We always disable the firmware traffic cop feature
1410 */
1411 if (emlxs_disable_traffic_cop) {
1412 /* Reuse mbq from previous mbox */
1413 bzero(mbq, sizeof (MAILBOXQ));
1414
1415 emlxs_disable_tc(hba, mbq);
1416 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1417 MBX_SUCCESS) {
1418 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1419 "Unable to disable traffic cop. "
1420 "Mailbox cmd=%x status=%x",
1421 mb->mbxCommand, mb->mbxStatus);
1422
1423 rval = EIO;
1424 goto failed;
1425 }
1426 }
1427
1428
1429 /* Reuse mbq from previous mbox */
1430 bzero(mbq, sizeof (MAILBOXQ));
1431
1432 /* Register for async events */
1433 emlxs_mb_async_event(hba, mbq);
1434 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1435 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1436 "Async events disabled. Mailbox status=%x",
1437 mb->mbxStatus);
1438 } else {
1439 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1440 "Async events enabled.");
1441 hba->flag |= FC_ASYNC_EVENTS;
1442 }
1443
1444 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1445
1446 emlxs_sli3_enable_intr(hba);
1447
1448 if (hba->flag & FC_HBQ_ENABLED) {
1449 if (port->flag & EMLXS_TGT_ENABLED) {
1450 if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1451 EMLXS_MSGF(EMLXS_CONTEXT,
1452 &emlxs_init_failed_msg,
1453 "Unable to setup FCT HBQ.");
1454
1455 rval = ENOMEM;
1456
1457 #ifdef SFCT_SUPPORT
1458 /* Check if we can fall back to just */
1459 /* initiator mode */
1460 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
1461 (port->flag & EMLXS_INI_ENABLED) &&
1462 (cfg[CFG_DTM_ENABLE].current == 1) &&
1463 (cfg[CFG_TARGET_MODE].current == 0)) {
1464
1465 cfg[CFG_DTM_ENABLE].current = 0;
1466
1467 EMLXS_MSGF(EMLXS_CONTEXT,
1468 &emlxs_init_failed_msg,
1469 "Disabling dynamic target mode. "
1470 "Enabling initiator mode only.");
1471
1472 /* This will trigger the driver to */
1473 /* reattach */
1474 rval = EAGAIN;
1475 }
1476 #endif /* SFCT_SUPPORT */
1477 goto failed;
1478 }
1479 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1480 "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1481 }
1482
1483 if (cfg[CFG_NETWORK_ON].current) {
1484 if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1485 EMLXS_MSGF(EMLXS_CONTEXT,
1486 &emlxs_init_failed_msg,
1487 "Unable to setup IP HBQ.");
1488
1489 rval = ENOMEM;
1490 goto failed;
1491 }
1492 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1493 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1494 }
1495
1496 if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1497 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1498 "Unable to setup ELS HBQ.");
1499 rval = ENOMEM;
1500 goto failed;
1501 }
1502 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1503 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1504
1505 if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1506 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1507 "Unable to setup CT HBQ.");
1508
1509 rval = ENOMEM;
1510 goto failed;
1511 }
1512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1513 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1514 } else {
1515 if (port->flag & EMLXS_TGT_ENABLED) {
1516 /* Post the FCT unsol buffers */
1517 rp = &hba->sli.sli3.ring[FC_FCT_RING];
1518 for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1519 (void) emlxs_post_buffer(hba, rp, 2);
1520 }
1521 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1522 "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1523 }
1524
1525 if (cfg[CFG_NETWORK_ON].current) {
1526 /* Post the IP unsol buffers */
1527 rp = &hba->sli.sli3.ring[FC_IP_RING];
1528 for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1529 (void) emlxs_post_buffer(hba, rp, 2);
1530 }
1531 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1532 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1533 }
1534
1535 /* Post the ELS unsol buffers */
1536 rp = &hba->sli.sli3.ring[FC_ELS_RING];
1537 for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1538 (void) emlxs_post_buffer(hba, rp, 2);
1539 }
1540 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1541 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1542
1543
1544 /* Post the CT unsol buffers */
1545 rp = &hba->sli.sli3.ring[FC_CT_RING];
1546 for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1547 (void) emlxs_post_buffer(hba, rp, 2);
1548 }
1549 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1550 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1551 }
1552
1553 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1554
1555 /* Check persist-linkdown */
1556 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1557 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1558 return (0);
1559 }
1560
1561 #ifdef SFCT_SUPPORT
1562 if ((port->mode == MODE_TARGET) &&
1563 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1564 emlxs_enable_latt(hba);
1565 return (0);
1566 }
1567 #endif /* SFCT_SUPPORT */
1568
1569 /*
1570 * Setup and issue mailbox INITIALIZE LINK command
1571 * At this point, the interrupt will be generated by the HW
1572 */
1573 mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX);
1574 if (mbq == NULL) {
1575 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1576 "Unable to allocate mailbox buffer.");
1577
1578 rval = EIO;
1579 goto failed;
1580 }
1581 mb = (MAILBOX *)mbq;
1582
1583 emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1584 cfg[CFG_LINK_SPEED].current);
1585
1586 rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1587 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1589 "Unable to initialize link. " \
1590 "Mailbox cmd=%x status=%x",
1591 mb->mbxCommand, mb->mbxStatus);
1592
1593 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1594 mbq = NULL;
1595 rval = EIO;
1596 goto failed;
1597 }
1598
1599 /*
1600 * Enable link attention interrupt
1601 */
1602 emlxs_enable_latt(hba);
1603
1604 /* Wait for link to come up */
1605 i = cfg[CFG_LINKUP_DELAY].current;
1606 while (i && (hba->state < FC_LINK_UP)) {
1607 /* Check for hardware error */
1608 if (hba->state == FC_ERROR) {
1609 EMLXS_MSGF(EMLXS_CONTEXT,
1610 &emlxs_init_failed_msg,
1611 "Adapter error.");
1612
1613 mbq = NULL;
1614 rval = EIO;
1615 goto failed;
1616 }
1617
1618 BUSYWAIT_MS(1000);
1619 i--;
1620 }
1621
1622 /*
1623 * The leadvile driver will now handle the FLOGI at the driver level
1624 */
1625
1626 return (0);
1627
1628 failed:
1629
1630 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1631
1632 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1633 (void) EMLXS_INTR_REMOVE(hba);
1634 }
1635
1636 if (mp) {
1637 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1638 mp = NULL;
1639 }
1640
1641 if (mp1) {
1642 emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1643 mp1 = NULL;
1644 }
1645
1646 (void) emlxs_mem_free_buffer(hba);
1647
1648 if (mbq) {
1649 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1650 mbq = NULL;
1651 mb = NULL;
1652 }
1653
1654 if (rval == 0) {
1655 rval = EIO;
1656 }
1657
1658 return (rval);
1659
1660 } /* emlxs_sli3_online() */
1661
1662
1663 /*ARGSUSED*/
1664 static void
emlxs_sli3_offline(emlxs_hba_t * hba,uint32_t reset_requested)1665 emlxs_sli3_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1666 {
1667 /* Reverse emlxs_sli3_online */
1668
1669 /* Kill the adapter */
1670 emlxs_sli3_hba_kill(hba);
1671
1672 /* Free driver shared memory */
1673 (void) emlxs_mem_free_buffer(hba);
1674
1675 } /* emlxs_sli3_offline() */
1676
1677
1678 static int
emlxs_sli3_map_hdw(emlxs_hba_t * hba)1679 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1680 {
1681 emlxs_port_t *port = &PPORT;
1682 dev_info_t *dip;
1683 ddi_device_acc_attr_t dev_attr;
1684 int status;
1685
1686 dip = (dev_info_t *)hba->dip;
1687 dev_attr = emlxs_dev_acc_attr;
1688
1689 if (hba->bus_type == SBUS_FC) {
1690
1691 if (hba->sli.sli3.slim_acc_handle == 0) {
1692 status = ddi_regs_map_setup(dip,
1693 SBUS_DFLY_SLIM_RINDEX,
1694 (caddr_t *)&hba->sli.sli3.slim_addr,
1695 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1696 if (status != DDI_SUCCESS) {
1697 EMLXS_MSGF(EMLXS_CONTEXT,
1698 &emlxs_attach_failed_msg,
1699 "(SBUS) ddi_regs_map_setup SLIM failed. "
1700 "status=%x", status);
1701 goto failed;
1702 }
1703 }
1704 if (hba->sli.sli3.csr_acc_handle == 0) {
1705 status = ddi_regs_map_setup(dip,
1706 SBUS_DFLY_CSR_RINDEX,
1707 (caddr_t *)&hba->sli.sli3.csr_addr,
1708 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1709 if (status != DDI_SUCCESS) {
1710 EMLXS_MSGF(EMLXS_CONTEXT,
1711 &emlxs_attach_failed_msg,
1712 "(SBUS) ddi_regs_map_setup DFLY CSR "
1713 "failed. status=%x", status);
1714 goto failed;
1715 }
1716 }
1717 if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1718 status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1719 (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1720 &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1721 if (status != DDI_SUCCESS) {
1722 EMLXS_MSGF(EMLXS_CONTEXT,
1723 &emlxs_attach_failed_msg,
1724 "(SBUS) ddi_regs_map_setup Fcode Flash "
1725 "failed. status=%x", status);
1726 goto failed;
1727 }
1728 }
1729 if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1730 status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1731 (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1732 &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1733 if (status != DDI_SUCCESS) {
1734 EMLXS_MSGF(EMLXS_CONTEXT,
1735 &emlxs_attach_failed_msg,
1736 "(SBUS) ddi_regs_map_setup TITAN CORE "
1737 "failed. status=%x", status);
1738 goto failed;
1739 }
1740 }
1741
1742 if (hba->sli.sli3.sbus_csr_handle == 0) {
1743 status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1744 (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1745 0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1746 if (status != DDI_SUCCESS) {
1747 EMLXS_MSGF(EMLXS_CONTEXT,
1748 &emlxs_attach_failed_msg,
1749 "(SBUS) ddi_regs_map_setup TITAN CSR "
1750 "failed. status=%x", status);
1751 goto failed;
1752 }
1753 }
1754 } else { /* ****** PCI ****** */
1755
1756 if (hba->sli.sli3.slim_acc_handle == 0) {
1757 status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1758 (caddr_t *)&hba->sli.sli3.slim_addr,
1759 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1760 if (status != DDI_SUCCESS) {
1761 EMLXS_MSGF(EMLXS_CONTEXT,
1762 &emlxs_attach_failed_msg,
1763 "(PCI) ddi_regs_map_setup SLIM failed. "
1764 "stat=%d mem=%p attr=%p hdl=%p",
1765 status, &hba->sli.sli3.slim_addr, &dev_attr,
1766 &hba->sli.sli3.slim_acc_handle);
1767 goto failed;
1768 }
1769 }
1770
1771 /*
1772 * Map in control registers, using memory-mapped version of
1773 * the registers rather than the I/O space-mapped registers.
1774 */
1775 if (hba->sli.sli3.csr_acc_handle == 0) {
1776 status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1777 (caddr_t *)&hba->sli.sli3.csr_addr,
1778 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1779 if (status != DDI_SUCCESS) {
1780 EMLXS_MSGF(EMLXS_CONTEXT,
1781 &emlxs_attach_failed_msg,
1782 "ddi_regs_map_setup CSR failed. status=%x",
1783 status);
1784 goto failed;
1785 }
1786 }
1787 }
1788
1789 if (hba->sli.sli3.slim2.virt == 0) {
1790 MBUF_INFO *buf_info;
1791 MBUF_INFO bufinfo;
1792
1793 buf_info = &bufinfo;
1794
1795 bzero(buf_info, sizeof (MBUF_INFO));
1796 buf_info->size = SLI_SLIM2_SIZE;
1797 buf_info->flags =
1798 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1799 buf_info->align = ddi_ptob(dip, 1L);
1800
1801 (void) emlxs_mem_alloc(hba, buf_info);
1802
1803 if (buf_info->virt == NULL) {
1804 goto failed;
1805 }
1806
1807 hba->sli.sli3.slim2.virt = buf_info->virt;
1808 hba->sli.sli3.slim2.phys = buf_info->phys;
1809 hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1810 hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1811 hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1812 bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1813 }
1814
1815 /* offset from beginning of register space */
1816 hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1817 (sizeof (uint32_t) * HA_REG_OFFSET));
1818 hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1819 (sizeof (uint32_t) * CA_REG_OFFSET));
1820 hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1821 (sizeof (uint32_t) * HS_REG_OFFSET));
1822 hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1823 (sizeof (uint32_t) * HC_REG_OFFSET));
1824 hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1825 (sizeof (uint32_t) * BC_REG_OFFSET));
1826
1827 if (hba->bus_type == SBUS_FC) {
1828 /* offset from beginning of register space */
1829 /* for TITAN registers */
1830 hba->sli.sli3.shc_reg_addr =
1831 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1832 (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1833 hba->sli.sli3.shs_reg_addr =
1834 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1835 (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1836 hba->sli.sli3.shu_reg_addr =
1837 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1838 (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1839 }
1840 hba->chan_count = MAX_RINGS;
1841
1842 return (0);
1843
1844 failed:
1845
1846 emlxs_sli3_unmap_hdw(hba);
1847 return (ENOMEM);
1848
1849 } /* emlxs_sli3_map_hdw() */
1850
1851
1852 static void
emlxs_sli3_unmap_hdw(emlxs_hba_t * hba)1853 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1854 {
1855 MBUF_INFO bufinfo;
1856 MBUF_INFO *buf_info = &bufinfo;
1857
1858 if (hba->sli.sli3.csr_acc_handle) {
1859 ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1860 hba->sli.sli3.csr_acc_handle = 0;
1861 }
1862
1863 if (hba->sli.sli3.slim_acc_handle) {
1864 ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1865 hba->sli.sli3.slim_acc_handle = 0;
1866 }
1867
1868 if (hba->sli.sli3.sbus_flash_acc_handle) {
1869 ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1870 hba->sli.sli3.sbus_flash_acc_handle = 0;
1871 }
1872
1873 if (hba->sli.sli3.sbus_core_acc_handle) {
1874 ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1875 hba->sli.sli3.sbus_core_acc_handle = 0;
1876 }
1877
1878 if (hba->sli.sli3.sbus_csr_handle) {
1879 ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1880 hba->sli.sli3.sbus_csr_handle = 0;
1881 }
1882
1883 if (hba->sli.sli3.slim2.virt) {
1884 bzero(buf_info, sizeof (MBUF_INFO));
1885
1886 if (hba->sli.sli3.slim2.phys) {
1887 buf_info->phys = hba->sli.sli3.slim2.phys;
1888 buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1889 buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1890 buf_info->flags = FC_MBUF_DMA;
1891 }
1892
1893 buf_info->virt = hba->sli.sli3.slim2.virt;
1894 buf_info->size = hba->sli.sli3.slim2.size;
1895 emlxs_mem_free(hba, buf_info);
1896
1897 hba->sli.sli3.slim2.virt = NULL;
1898 }
1899
1900
1901 return;
1902
1903 } /* emlxs_sli3_unmap_hdw() */
1904
1905
1906 static uint32_t
emlxs_sli3_hba_init(emlxs_hba_t * hba)1907 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1908 {
1909 emlxs_port_t *port = &PPORT;
1910 emlxs_port_t *vport;
1911 emlxs_config_t *cfg;
1912 uint16_t i;
1913 VPIobj_t *vpip;
1914
1915 cfg = &CFG;
1916 i = 0;
1917
1918 /* Restart the adapter */
1919 if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1920 return (1);
1921 }
1922
1923 hba->channel_fcp = FC_FCP_RING;
1924 hba->channel_els = FC_ELS_RING;
1925 hba->channel_ip = FC_IP_RING;
1926 hba->channel_ct = FC_CT_RING;
1927 hba->chan_count = MAX_RINGS;
1928 hba->sli.sli3.ring_count = MAX_RINGS;
1929
1930 /*
1931 * WARNING: There is a max of 6 ring masks allowed
1932 */
1933 /* RING 0 - FCP */
1934 if (port->flag & EMLXS_TGT_ENABLED) {
1935 hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1936 hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1937 hba->sli.sli3.ring_rmask[i] = 0;
1938 hba->sli.sli3.ring_tval[i] = FC_TYPE_SCSI_FCP;
1939 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1940 } else {
1941 hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1942 }
1943
1944 hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1945 hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1946
1947 /* RING 1 - IP */
1948 if (cfg[CFG_NETWORK_ON].current) {
1949 hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1950 hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1951 hba->sli.sli3.ring_rmask[i] = 0xFF;
1952 hba->sli.sli3.ring_tval[i] = FC_TYPE_IS8802_SNAP; /* LLC/SNAP */
1953 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1954 } else {
1955 hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1956 }
1957
1958 hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1959 hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1960
1961 /* RING 2 - ELS */
1962 hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1963 hba->sli.sli3.ring_rval[i] = FC_ELS_REQ; /* ELS request/rsp */
1964 hba->sli.sli3.ring_rmask[i] = 0xFE;
1965 hba->sli.sli3.ring_tval[i] = FC_TYPE_EXTENDED_LS; /* ELS */
1966 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1967
1968 hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1969 hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1970
1971 /* RING 3 - CT */
1972 hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1973 hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL; /* CT request/rsp */
1974 hba->sli.sli3.ring_rmask[i] = 0xFE;
1975 hba->sli.sli3.ring_tval[i] = FC_TYPE_FC_SERVICES; /* CT */
1976 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1977
1978 hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1979 hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1980
1981 if (i > 6) {
1982 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1983 "hba_init: Too many ring masks defined. cnt=%d", i);
1984 return (1);
1985 }
1986
1987 /* Initialize all the port objects */
1988 hba->vpi_max = 0;
1989 for (i = 0; i < MAX_VPORTS; i++) {
1990 vport = &VPORT(i);
1991 vport->hba = hba;
1992 vport->vpi = i;
1993
1994 vpip = &vport->VPIobj;
1995 vpip->index = i;
1996 vpip->VPI = i;
1997 vpip->port = vport;
1998 vpip->state = VPI_STATE_OFFLINE;
1999 vport->vpip = vpip;
2000 }
2001
2002 /*
2003 * Initialize the max_node count to a default value if needed
2004 * This determines how many node objects we preallocate in the pool
2005 * The actual max_nodes will be set later based on adapter info
2006 */
2007 if (hba->max_nodes == 0) {
2008 if (cfg[CFG_NUM_NODES].current > 0) {
2009 hba->max_nodes = cfg[CFG_NUM_NODES].current;
2010 } else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
2011 hba->max_nodes = 4096;
2012 } else {
2013 hba->max_nodes = 512;
2014 }
2015 }
2016
2017 return (0);
2018
2019 } /* emlxs_sli3_hba_init() */
2020
2021
2022 /*
2023 * 0: quiesce indicates the call is not from quiesce routine.
2024 * 1: quiesce indicates the call is from quiesce routine.
2025 */
2026 static uint32_t
emlxs_sli3_hba_reset(emlxs_hba_t * hba,uint32_t restart,uint32_t skip_post,uint32_t quiesce)2027 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2028 uint32_t quiesce)
2029 {
2030 emlxs_port_t *port = &PPORT;
2031 MAILBOX *swpmb;
2032 MAILBOX *mb;
2033 uint32_t word0;
2034 uint16_t cfg_value;
2035 uint32_t status = 0;
2036 uint32_t status1;
2037 uint32_t status2;
2038 uint32_t i;
2039 uint32_t ready;
2040 emlxs_port_t *vport;
2041 RING *rp;
2042 emlxs_config_t *cfg = &CFG;
2043
2044 if (!cfg[CFG_RESET_ENABLE].current) {
2045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2046 "Adapter reset disabled.");
2047 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2048
2049 return (1);
2050 }
2051
2052 /* Kill the adapter first */
2053 if (quiesce == 0) {
2054 emlxs_sli3_hba_kill(hba);
2055 } else {
2056 emlxs_sli3_hba_kill4quiesce(hba);
2057 }
2058
2059 if (restart) {
2060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2061 "Restarting.");
2062 EMLXS_STATE_CHANGE(hba, FC_INIT_START);
2063
2064 ready = (HS_FFRDY | HS_MBRDY);
2065 } else {
2066 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2067 "Resetting.");
2068 EMLXS_STATE_CHANGE(hba, FC_WARM_START);
2069
2070 ready = HS_MBRDY;
2071 }
2072
2073 hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
2074
2075 mb = FC_SLIM1_MAILBOX(hba);
2076 swpmb = (MAILBOX *)&word0;
2077
2078 reset:
2079
2080 i = 0;
2081
2082 /* Save reset time */
2083 HBASTATS.ResetTime = hba->timer_tics;
2084
2085 if (restart) {
2086 /* First put restart command in mailbox */
2087 word0 = 0;
2088 swpmb->mbxCommand = MBX_RESTART;
2089 swpmb->mbxHc = 1;
2090 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
2091
2092 /* Only skip post after emlxs_sli3_online is completed */
2093 if (skip_post) {
2094 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2095 1);
2096 } else {
2097 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2098 0);
2099 }
2100
2101 }
2102
2103 /*
2104 * Turn off SERR, PERR in PCI cmd register
2105 */
2106 cfg_value = ddi_get16(hba->pci_acc_handle,
2107 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
2108
2109 ddi_put16(hba->pci_acc_handle,
2110 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2111 (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
2112
2113 hba->sli.sli3.hc_copy = HC_INITFF;
2114 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
2115
2116 /* Wait 1 msec before restoring PCI config */
2117 BUSYWAIT_MS(1);
2118
2119 /* Restore PCI cmd register */
2120 ddi_put16(hba->pci_acc_handle,
2121 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2122 (uint16_t)cfg_value);
2123
2124 /* Wait 3 seconds before checking */
2125 BUSYWAIT_MS(3000);
2126 i += 3;
2127
2128 /* Wait for reset completion */
2129 while (i < 30) {
2130 /* Check status register to see what current state is */
2131 status = READ_CSR_REG(hba, FC_HS_REG(hba));
2132
2133 /* Check to see if any errors occurred during init */
2134 if (status & HS_FFERM) {
2135 status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2136 hba->sli.sli3.slim_addr + 0xa8));
2137 status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2138 hba->sli.sli3.slim_addr + 0xac));
2139
2140 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2141 "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2142 status, status1, status2);
2143
2144 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2145 return (1);
2146 }
2147
2148 if ((status & ready) == ready) {
2149 /* Reset Done !! */
2150 goto done;
2151 }
2152
2153 /*
2154 * Check every 1 second for 15 seconds, then reset board
2155 * again (w/post), then check every 1 second for 15 * seconds.
2156 */
2157 BUSYWAIT_MS(1000);
2158 i++;
2159
2160 /* Reset again (w/post) at 15 seconds */
2161 if (i == 15) {
2162 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2163 "Reset failed. Retrying...");
2164
2165 goto reset;
2166 }
2167 }
2168
2169 #ifdef FMA_SUPPORT
2170 reset_fail:
2171 #endif /* FMA_SUPPORT */
2172
2173 /* Timeout occurred */
2174 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2175 "Timeout: status=0x%x", status);
2176 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2177
2178 /* Log a dump event */
2179 emlxs_log_dump_event(port, NULL, 0);
2180
2181 return (1);
2182
2183 done:
2184
2185 /* Initialize hc_copy */
2186 hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2187
2188 #ifdef FMA_SUPPORT
2189 /* Access handle validation */
2190 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2191 != DDI_FM_OK) ||
2192 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2193 != DDI_FM_OK) ||
2194 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2195 != DDI_FM_OK)) {
2196 EMLXS_MSGF(EMLXS_CONTEXT,
2197 &emlxs_invalid_access_handle_msg, NULL);
2198 goto reset_fail;
2199 }
2200 #endif /* FMA_SUPPORT */
2201
2202 /* Reset the hba structure */
2203 hba->flag &= FC_RESET_MASK;
2204 hba->channel_tx_count = 0;
2205 hba->io_count = 0;
2206 hba->iodone_count = 0;
2207 hba->topology = 0;
2208 hba->linkspeed = 0;
2209 hba->heartbeat_active = 0;
2210 hba->discovery_timer = 0;
2211 hba->linkup_timer = 0;
2212 hba->loopback_tics = 0;
2213
2214 /* Reset the ring objects */
2215 for (i = 0; i < MAX_RINGS; i++) {
2216 rp = &hba->sli.sli3.ring[i];
2217 rp->fc_mpon = 0;
2218 rp->fc_mpoff = 0;
2219 }
2220
2221 /* Reset the port objects */
2222 for (i = 0; i < MAX_VPORTS; i++) {
2223 vport = &VPORT(i);
2224
2225 vport->flag &= EMLXS_PORT_RESET_MASK;
2226 vport->did = 0;
2227 vport->prev_did = 0;
2228 vport->lip_type = 0;
2229 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2230 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2231
2232 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2233 vport->node_base.nlp_Rpi = 0;
2234 vport->node_base.nlp_DID = 0xffffff;
2235 vport->node_base.nlp_list_next = NULL;
2236 vport->node_base.nlp_list_prev = NULL;
2237 vport->node_base.nlp_active = 1;
2238 vport->node_count = 0;
2239
2240 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2241 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2242 }
2243 }
2244
2245 return (0);
2246
2247 } /* emlxs_sli3_hba_reset */
2248
2249
2250 #define BPL_CMD 0
2251 #define BPL_RESP 1
2252 #define BPL_DATA 2
2253
2254 static ULP_BDE64 *
emlxs_pkt_to_bpl(fc_packet_t * pkt,ULP_BDE64 * bpl,uint32_t bpl_type)2255 emlxs_pkt_to_bpl(fc_packet_t *pkt, ULP_BDE64 *bpl, uint32_t bpl_type)
2256 {
2257 ddi_dma_cookie_t *cp;
2258 uint_t i;
2259 int32_t size;
2260 uint_t cookie_cnt;
2261 uint8_t bdeFlags;
2262
2263 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2264 switch (bpl_type) {
2265 case BPL_CMD:
2266 cp = pkt->pkt_cmd_cookie;
2267 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2268 size = (int32_t)pkt->pkt_cmdlen;
2269 bdeFlags = 0;
2270 break;
2271
2272 case BPL_RESP:
2273 cp = pkt->pkt_resp_cookie;
2274 cookie_cnt = pkt->pkt_resp_cookie_cnt;
2275 size = (int32_t)pkt->pkt_rsplen;
2276 bdeFlags = BUFF_USE_RCV;
2277 break;
2278
2279
2280 case BPL_DATA:
2281 cp = pkt->pkt_data_cookie;
2282 cookie_cnt = pkt->pkt_data_cookie_cnt;
2283 size = (int32_t)pkt->pkt_datalen;
2284 bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2285 BUFF_USE_RCV : 0;
2286 break;
2287
2288 default:
2289 return (NULL);
2290 }
2291
2292 #else
2293 switch (bpl_type) {
2294 case BPL_CMD:
2295 cp = &pkt->pkt_cmd_cookie;
2296 cookie_cnt = 1;
2297 size = (int32_t)pkt->pkt_cmdlen;
2298 bdeFlags = 0;
2299 break;
2300
2301 case BPL_RESP:
2302 cp = &pkt->pkt_resp_cookie;
2303 cookie_cnt = 1;
2304 size = (int32_t)pkt->pkt_rsplen;
2305 bdeFlags = BUFF_USE_RCV;
2306 break;
2307
2308
2309 case BPL_DATA:
2310 cp = &pkt->pkt_data_cookie;
2311 cookie_cnt = 1;
2312 size = (int32_t)pkt->pkt_datalen;
2313 bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2314 BUFF_USE_RCV : 0;
2315 break;
2316
2317 default:
2318 return (NULL);
2319 }
2320 #endif /* >= EMLXS_MODREV3 */
2321
2322 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2323 bpl->addrHigh =
2324 BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2325 bpl->addrLow =
2326 BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2327 bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2328 bpl->tus.f.bdeFlags = bdeFlags;
2329 bpl->tus.w = BE_SWAP32(bpl->tus.w);
2330
2331 bpl++;
2332 size -= cp->dmac_size;
2333 }
2334
2335 return (bpl);
2336
2337 } /* emlxs_pkt_to_bpl */
2338
2339
2340 static uint32_t
emlxs_sli2_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2341 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2342 {
2343 emlxs_hba_t *hba = HBA;
2344 fc_packet_t *pkt;
2345 MATCHMAP *bmp;
2346 ULP_BDE64 *bpl;
2347 uint64_t bp;
2348 IOCB *iocb;
2349 IOCBQ *iocbq;
2350 CHANNEL *cp;
2351 uint32_t data_cookie_cnt;
2352 uint32_t channelno;
2353
2354 cp = sbp->channel;
2355 iocb = (IOCB *) & sbp->iocbq;
2356 pkt = PRIV2PKT(sbp);
2357
2358 if (hba->sli.sli3.bpl_table) {
2359 bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2360 } else {
2361 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2362 }
2363
2364 if (!bmp) {
2365 return (1);
2366 }
2367
2368 sbp->bmp = bmp;
2369 bpl = (ULP_BDE64 *)bmp->virt;
2370 bp = bmp->phys;
2371
2372 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2373 data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2374 #else
2375 data_cookie_cnt = 1;
2376 #endif /* >= EMLXS_MODREV3 */
2377
2378 iocbq = &sbp->iocbq;
2379
2380 channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2381 switch (channelno) {
2382 case FC_FCP_RING:
2383
2384 /* CMD payload */
2385 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2386 if (! bpl) {
2387 return (1);
2388 }
2389
2390 /* Check if response & data payloads are needed */
2391 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2392 break;
2393 }
2394
2395 /* RSP payload */
2396 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2397 if (! bpl) {
2398 return (1);
2399 }
2400
2401 /* Check if data payload is needed */
2402 if ((pkt->pkt_datalen == 0) ||
2403 (data_cookie_cnt == 0)) {
2404 break;
2405 }
2406
2407 /* DATA payload */
2408 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_DATA);
2409 if (! bpl) {
2410 return (1);
2411 }
2412 break;
2413
2414 case FC_IP_RING:
2415
2416 /* CMD payload */
2417 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2418 if (! bpl) {
2419 return (1);
2420 }
2421 break;
2422
2423 case FC_ELS_RING:
2424
2425 /* CMD payload */
2426 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2427 if (! bpl) {
2428 return (1);
2429 }
2430
2431 /* Check if response payload is needed */
2432 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2433 break;
2434 }
2435
2436 /* RSP payload */
2437 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2438 if (! bpl) {
2439 return (1);
2440 }
2441 break;
2442
2443 case FC_CT_RING:
2444
2445 /* CMD payload */
2446 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2447 if (! bpl) {
2448 return (1);
2449 }
2450
2451 /* Check if response payload is needed */
2452 if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2453 (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2454 break;
2455 }
2456
2457 /* RSP payload */
2458 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2459 if (! bpl) {
2460 return (1);
2461 }
2462 break;
2463
2464 }
2465
2466 iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2467 iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2468 iocb->un.genreq64.bdl.addrLow = PADDR_LO(bp);
2469 iocb->un.genreq64.bdl.bdeSize =
2470 (uint32_t)(((uintptr_t)bpl - (uintptr_t)bmp->virt) & 0xFFFFFFFF);
2471 iocb->ULPBDECOUNT = 1;
2472 iocb->ULPLE = 1;
2473
2474 return (0);
2475
2476 } /* emlxs_sli2_bde_setup */
2477
2478
2479 static uint32_t
emlxs_sli3_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2480 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2481 {
2482 ddi_dma_cookie_t *cp_cmd;
2483 ddi_dma_cookie_t *cp_resp;
2484 ddi_dma_cookie_t *cp_data;
2485 fc_packet_t *pkt;
2486 ULP_BDE64 *bde;
2487 int data_cookie_cnt;
2488 uint32_t i;
2489 uint32_t channelno;
2490 IOCB *iocb;
2491 IOCBQ *iocbq;
2492 CHANNEL *cp;
2493
2494 pkt = PRIV2PKT(sbp);
2495 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2496 if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2497 (pkt->pkt_resp_cookie_cnt > 1) ||
2498 ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2499 pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2500 i = emlxs_sli2_bde_setup(port, sbp);
2501 return (i);
2502 }
2503
2504 cp_cmd = pkt->pkt_cmd_cookie;
2505 cp_resp = pkt->pkt_resp_cookie;
2506 cp_data = pkt->pkt_data_cookie;
2507 data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2508 #else
2509 cp_cmd = &pkt->pkt_cmd_cookie;
2510 cp_resp = &pkt->pkt_resp_cookie;
2511 cp_data = &pkt->pkt_data_cookie;
2512 data_cookie_cnt = 1;
2513 #endif /* >= EMLXS_MODREV3 */
2514
2515 cp = sbp->channel;
2516 iocbq = &sbp->iocbq;
2517 iocb = (IOCB *)iocbq;
2518 iocb->unsli3.ext_iocb.ebde_count = 0;
2519
2520 channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2521 switch (channelno) {
2522 case FC_FCP_RING:
2523 /* CMD payload */
2524 iocb->un.fcpi64.bdl.addrHigh =
2525 PADDR_HI(cp_cmd->dmac_laddress);
2526 iocb->un.fcpi64.bdl.addrLow =
2527 PADDR_LO(cp_cmd->dmac_laddress);
2528 iocb->un.fcpi64.bdl.bdeSize = pkt->pkt_cmdlen;
2529 iocb->un.fcpi64.bdl.bdeFlags = 0;
2530
2531 /* Check if a response & data payload are needed */
2532 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2533 break;
2534 }
2535
2536 /* RSP payload */
2537 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2538 PADDR_HI(cp_resp->dmac_laddress);
2539 iocb->unsli3.ext_iocb.ebde1.addrLow =
2540 PADDR_LO(cp_resp->dmac_laddress);
2541 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2542 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2543 iocb->unsli3.ext_iocb.ebde_count = 1;
2544
2545 /* Check if a data payload is needed */
2546 if ((pkt->pkt_datalen == 0) ||
2547 (data_cookie_cnt == 0)) {
2548 break;
2549 }
2550
2551 /* DATA payload */
2552 bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2;
2553 for (i = 0; i < data_cookie_cnt; i++) {
2554 bde->addrHigh = PADDR_HI(cp_data->dmac_laddress);
2555 bde->addrLow = PADDR_LO(cp_data->dmac_laddress);
2556 bde->tus.f.bdeSize = cp_data->dmac_size;
2557 bde->tus.f.bdeFlags = 0;
2558 cp_data++;
2559 bde++;
2560 }
2561 iocb->unsli3.ext_iocb.ebde_count += data_cookie_cnt;
2562
2563 break;
2564
2565 case FC_IP_RING:
2566 /* CMD payload */
2567 iocb->un.xseq64.bdl.addrHigh =
2568 PADDR_HI(cp_cmd->dmac_laddress);
2569 iocb->un.xseq64.bdl.addrLow =
2570 PADDR_LO(cp_cmd->dmac_laddress);
2571 iocb->un.xseq64.bdl.bdeSize = pkt->pkt_cmdlen;
2572 iocb->un.xseq64.bdl.bdeFlags = 0;
2573
2574 break;
2575
2576 case FC_ELS_RING:
2577
2578 /* CMD payload */
2579 iocb->un.elsreq64.bdl.addrHigh =
2580 PADDR_HI(cp_cmd->dmac_laddress);
2581 iocb->un.elsreq64.bdl.addrLow =
2582 PADDR_LO(cp_cmd->dmac_laddress);
2583 iocb->un.elsreq64.bdl.bdeSize = pkt->pkt_cmdlen;
2584 iocb->un.elsreq64.bdl.bdeFlags = 0;
2585
2586 /* Check if a response payload is needed */
2587 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2588 break;
2589 }
2590
2591 /* RSP payload */
2592 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2593 PADDR_HI(cp_resp->dmac_laddress);
2594 iocb->unsli3.ext_iocb.ebde1.addrLow =
2595 PADDR_LO(cp_resp->dmac_laddress);
2596 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2597 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2598 iocb->unsli3.ext_iocb.ebde_count = 1;
2599 break;
2600
2601 case FC_CT_RING:
2602
2603 /* CMD payload */
2604 iocb->un.genreq64.bdl.addrHigh =
2605 PADDR_HI(cp_cmd->dmac_laddress);
2606 iocb->un.genreq64.bdl.addrLow =
2607 PADDR_LO(cp_cmd->dmac_laddress);
2608 iocb->un.genreq64.bdl.bdeSize = pkt->pkt_cmdlen;
2609 iocb->un.genreq64.bdl.bdeFlags = 0;
2610
2611 /* Check if a response payload is needed */
2612 if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2613 (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2614 break;
2615 }
2616
2617 /* RSP payload */
2618 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2619 PADDR_HI(cp_resp->dmac_laddress);
2620 iocb->unsli3.ext_iocb.ebde1.addrLow =
2621 PADDR_LO(cp_resp->dmac_laddress);
2622 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2623 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2624 iocb->unsli3.ext_iocb.ebde_count = 1;
2625 break;
2626 }
2627
2628 iocb->ULPBDECOUNT = 0;
2629 iocb->ULPLE = 0;
2630
2631 return (0);
2632
2633 } /* emlxs_sli3_bde_setup */
2634
2635
2636 /* Only used for FCP Data xfers */
2637 #ifdef SFCT_SUPPORT
2638 /*ARGSUSED*/
2639 static uint32_t
emlxs_sli2_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2640 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2641 {
2642 emlxs_hba_t *hba = HBA;
2643 scsi_task_t *fct_task;
2644 MATCHMAP *bmp;
2645 ULP_BDE64 *bpl;
2646 uint64_t bp;
2647 uint8_t bdeFlags;
2648 IOCB *iocb;
2649 uint32_t size;
2650 MATCHMAP *mp;
2651
2652 iocb = (IOCB *)&sbp->iocbq.iocb;
2653 sbp->bmp = NULL;
2654
2655 if (!sbp->fct_buf) {
2656 iocb->un.fcpt64.bdl.addrHigh = 0;
2657 iocb->un.fcpt64.bdl.addrLow = 0;
2658 iocb->un.fcpt64.bdl.bdeSize = 0;
2659 iocb->un.fcpt64.bdl.bdeFlags = 0;
2660 iocb->un.fcpt64.fcpt_Offset = 0;
2661 iocb->un.fcpt64.fcpt_Length = 0;
2662 iocb->ULPBDECOUNT = 0;
2663 iocb->ULPLE = 1;
2664 return (0);
2665 }
2666
2667 if (hba->sli.sli3.bpl_table) {
2668 bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2669 } else {
2670 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2671 }
2672
2673 if (!bmp) {
2674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2675 "fct_sli2_bde_setup: Unable to BPL buffer. iotag=%d",
2676 sbp->iotag);
2677
2678 iocb->un.fcpt64.bdl.addrHigh = 0;
2679 iocb->un.fcpt64.bdl.addrLow = 0;
2680 iocb->un.fcpt64.bdl.bdeSize = 0;
2681 iocb->un.fcpt64.bdl.bdeFlags = 0;
2682 iocb->un.fcpt64.fcpt_Offset = 0;
2683 iocb->un.fcpt64.fcpt_Length = 0;
2684 iocb->ULPBDECOUNT = 0;
2685 iocb->ULPLE = 1;
2686 return (1);
2687 }
2688
2689 bpl = (ULP_BDE64 *)bmp->virt;
2690 bp = bmp->phys;
2691
2692 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2693
2694 size = sbp->fct_buf->db_data_size;
2695 mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2696
2697 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2698
2699 /* Init the buffer list */
2700 bpl->addrHigh = BE_SWAP32(PADDR_HI(mp->phys));
2701 bpl->addrLow = BE_SWAP32(PADDR_LO(mp->phys));
2702 bpl->tus.f.bdeSize = size;
2703 bpl->tus.f.bdeFlags = bdeFlags;
2704 bpl->tus.w = BE_SWAP32(bpl->tus.w);
2705
2706 /* Init the IOCB */
2707 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2708 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2709 iocb->un.fcpt64.bdl.bdeSize = sizeof (ULP_BDE64);
2710 iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2711
2712 iocb->un.fcpt64.fcpt_Length =
2713 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2714 iocb->un.fcpt64.fcpt_Offset = 0;
2715
2716 iocb->ULPBDECOUNT = 1;
2717 iocb->ULPLE = 1;
2718 sbp->bmp = bmp;
2719
2720 return (0);
2721
2722 } /* emlxs_sli2_fct_bde_setup */
2723 #endif /* SFCT_SUPPORT */
2724
2725
2726 #ifdef SFCT_SUPPORT
2727 /*ARGSUSED*/
2728 static uint32_t
emlxs_sli3_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2729 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2730 {
2731 scsi_task_t *fct_task;
2732 IOCB *iocb;
2733 MATCHMAP *mp;
2734 uint32_t bdeFlags;
2735 uint32_t size;
2736
2737 iocb = (IOCB *)&sbp->iocbq;
2738
2739 if (!sbp->fct_buf) {
2740 iocb->un.fcpt64.bdl.addrHigh = 0;
2741 iocb->un.fcpt64.bdl.addrLow = 0;
2742 iocb->un.fcpt64.bdl.bdeSize = 0;
2743 iocb->un.fcpt64.bdl.bdeFlags = 0;
2744 iocb->un.fcpt64.fcpt_Offset = 0;
2745 iocb->un.fcpt64.fcpt_Length = 0;
2746 iocb->ULPBDECOUNT = 0;
2747 iocb->ULPLE = 0;
2748 iocb->unsli3.ext_iocb.ebde_count = 0;
2749 return (0);
2750 }
2751
2752 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2753
2754 size = sbp->fct_buf->db_data_size;
2755 mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2756
2757 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2758
2759 /* Init first BDE */
2760 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(mp->phys);
2761 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(mp->phys);
2762 iocb->un.fcpt64.bdl.bdeSize = size;
2763 iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2764
2765 iocb->unsli3.ext_iocb.ebde_count = 0;
2766 iocb->un.fcpt64.fcpt_Length =
2767 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2768 iocb->un.fcpt64.fcpt_Offset = 0;
2769
2770 iocb->ULPBDECOUNT = 0;
2771 iocb->ULPLE = 0;
2772
2773 return (0);
2774
2775 } /* emlxs_sli3_fct_bde_setup */
2776 #endif /* SFCT_SUPPORT */
2777
2778
2779 static void
emlxs_sli3_issue_iocb_cmd(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)2780 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2781 {
2782 #ifdef FMA_SUPPORT
2783 emlxs_port_t *port = &PPORT;
2784 #endif /* FMA_SUPPORT */
2785 PGP *pgp;
2786 emlxs_buf_t *sbp;
2787 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2788 RING *rp;
2789 uint32_t nextIdx;
2790 uint32_t status;
2791 void *ioa2;
2792 off_t offset;
2793 uint32_t count = 0;
2794 uint32_t flag;
2795 uint32_t channelno;
2796 int32_t throttle;
2797 #ifdef NODE_THROTTLE_SUPPORT
2798 int32_t node_throttle;
2799 NODELIST *marked_node = NULL;
2800 #endif /* NODE_THROTTLE_SUPPORT */
2801
2802 channelno = cp->channelno;
2803 rp = (RING *)cp->iopath;
2804
2805 throttle = 0;
2806
2807 /* Check if FCP ring and adapter is not ready */
2808 /* We may use any ring for FCP_CMD */
2809 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2810 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2811 (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2812 emlxs_tx_put(iocbq, 1);
2813 return;
2814 }
2815 }
2816
2817 /* Attempt to acquire CMD_RING lock */
2818 if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2819 /* Queue it for later */
2820 if (iocbq) {
2821 if ((hba->io_count -
2822 hba->channel_tx_count) > 10) {
2823 emlxs_tx_put(iocbq, 1);
2824 return;
2825 } else {
2826
2827 /*
2828 * EMLXS_MSGF(EMLXS_CONTEXT,
2829 * &emlxs_ring_watchdog_msg,
2830 * "%s host=%d port=%d cnt=%d,%d RACE
2831 * CONDITION3 DETECTED.",
2832 * emlxs_ring_xlate(channelno),
2833 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2834 * hba->channel_tx_count,
2835 * hba->io_count);
2836 */
2837 mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2838 }
2839 } else {
2840 return;
2841 }
2842 }
2843 /* CMD_RING_LOCK acquired */
2844
2845 /* Throttle check only applies to non special iocb */
2846 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2847 /* Check if HBA is full */
2848 throttle = hba->io_throttle - hba->io_active;
2849 if (throttle <= 0) {
2850 /* Hitting adapter throttle limit */
2851 /* Queue it for later */
2852 if (iocbq) {
2853 emlxs_tx_put(iocbq, 1);
2854 }
2855
2856 goto busy;
2857 }
2858 }
2859
2860 /* Read adapter's get index */
2861 pgp = (PGP *)
2862 &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2863 offset =
2864 (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2865 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2866 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2867 DDI_DMA_SYNC_FORKERNEL);
2868 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2869
2870 /* Calculate the next put index */
2871 nextIdx =
2872 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2873
2874 /* Check if ring is full */
2875 if (nextIdx == rp->fc_port_cmdidx) {
2876 /* Try one more time */
2877 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2878 DDI_DMA_SYNC_FORKERNEL);
2879 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2880
2881 if (nextIdx == rp->fc_port_cmdidx) {
2882 /* Queue it for later */
2883 if (iocbq) {
2884 emlxs_tx_put(iocbq, 1);
2885 }
2886
2887 goto busy;
2888 }
2889 }
2890
2891 /*
2892 * We have a command ring slot available
2893 * Make sure we have an iocb to send
2894 */
2895 if (iocbq) {
2896 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2897
2898 /* Check if the ring already has iocb's waiting */
2899 if (cp->nodeq.q_first != NULL) {
2900 /* Put the current iocbq on the tx queue */
2901 emlxs_tx_put(iocbq, 0);
2902
2903 /*
2904 * Attempt to replace it with the next iocbq
2905 * in the tx queue
2906 */
2907 iocbq = emlxs_tx_get(cp, 0);
2908 }
2909
2910 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2911 } else {
2912 /* Try to get the next iocb on the tx queue */
2913 iocbq = emlxs_tx_get(cp, 1);
2914 }
2915
2916 sendit:
2917 count = 0;
2918
2919 /* Process each iocbq */
2920 while (iocbq) {
2921 sbp = iocbq->sbp;
2922
2923 #ifdef NODE_THROTTLE_SUPPORT
2924 if (sbp && sbp->node && sbp->node->io_throttle) {
2925 node_throttle = sbp->node->io_throttle -
2926 sbp->node->io_active;
2927 if (node_throttle <= 0) {
2928 /* Node is busy */
2929 /* Queue this iocb and get next iocb from */
2930 /* channel */
2931
2932 if (!marked_node) {
2933 marked_node = sbp->node;
2934 }
2935
2936 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2937 emlxs_tx_put(iocbq, 0);
2938
2939 if (cp->nodeq.q_first == marked_node) {
2940 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2941 goto busy;
2942 }
2943
2944 iocbq = emlxs_tx_get(cp, 0);
2945 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2946 continue;
2947 }
2948 }
2949 marked_node = 0;
2950 #endif /* NODE_THROTTLE_SUPPORT */
2951
2952 if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2953 /*
2954 * Update adapter if needed, since we are about to
2955 * delay here
2956 */
2957 if (count) {
2958 count = 0;
2959
2960 /* Update the adapter's cmd put index */
2961 if (hba->bus_type == SBUS_FC) {
2962 slim2p->mbx.us.s2.host[channelno].
2963 cmdPutInx =
2964 BE_SWAP32(rp->fc_cmdidx);
2965
2966 /* DMA sync the index for the adapter */
2967 offset = (off_t)
2968 ((uint64_t)
2969 ((unsigned long)&(slim2p->mbx.us.
2970 s2.host[channelno].cmdPutInx)) -
2971 (uint64_t)((unsigned long)slim2p));
2972 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2973 dma_handle, offset, 4,
2974 DDI_DMA_SYNC_FORDEV);
2975 } else {
2976 ioa2 = (void *)
2977 ((char *)hba->sli.sli3.slim_addr +
2978 hba->sli.sli3.hgp_ring_offset +
2979 ((channelno * 2) *
2980 sizeof (uint32_t)));
2981 WRITE_SLIM_ADDR(hba,
2982 (volatile uint32_t *)ioa2,
2983 rp->fc_cmdidx);
2984 }
2985
2986 status = (CA_R0ATT << (channelno * 4));
2987 WRITE_CSR_REG(hba, FC_CA_REG(hba),
2988 (volatile uint32_t)status);
2989
2990 }
2991 /* Perform delay */
2992 if ((channelno == FC_ELS_RING) &&
2993 !(iocbq->flag & IOCB_FCP_CMD)) {
2994 drv_usecwait(100000);
2995 } else {
2996 drv_usecwait(20000);
2997 }
2998 }
2999
3000 /*
3001 * At this point, we have a command ring slot available
3002 * and an iocb to send
3003 */
3004 flag = iocbq->flag;
3005
3006 /* Send the iocb */
3007 emlxs_sli3_issue_iocb(hba, rp, iocbq);
3008 /*
3009 * After this, the sbp / iocb should not be
3010 * accessed in the xmit path.
3011 */
3012
3013 count++;
3014 if (iocbq && (!(flag & IOCB_SPECIAL))) {
3015 /* Check if HBA is full */
3016 throttle = hba->io_throttle - hba->io_active;
3017 if (throttle <= 0) {
3018 goto busy;
3019 }
3020 }
3021
3022 /* Calculate the next put index */
3023 nextIdx =
3024 (rp->fc_cmdidx + 1 >=
3025 rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
3026
3027 /* Check if ring is full */
3028 if (nextIdx == rp->fc_port_cmdidx) {
3029 /* Try one more time */
3030 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3031 offset, 4, DDI_DMA_SYNC_FORKERNEL);
3032 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
3033
3034 if (nextIdx == rp->fc_port_cmdidx) {
3035 goto busy;
3036 }
3037 }
3038
3039 /* Get the next iocb from the tx queue if there is one */
3040 iocbq = emlxs_tx_get(cp, 1);
3041 }
3042
3043 if (count) {
3044 /* Update the adapter's cmd put index */
3045 if (hba->bus_type == SBUS_FC) {
3046 slim2p->mbx.us.s2.host[channelno].
3047 cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
3048
3049 /* DMA sync the index for the adapter */
3050 offset = (off_t)
3051 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3052 host[channelno].cmdPutInx)) -
3053 (uint64_t)((unsigned long)slim2p));
3054 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3055 offset, 4, DDI_DMA_SYNC_FORDEV);
3056 } else {
3057 ioa2 =
3058 (void *)((char *)hba->sli.sli3.slim_addr +
3059 hba->sli.sli3.hgp_ring_offset +
3060 ((channelno * 2) * sizeof (uint32_t)));
3061 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3062 rp->fc_cmdidx);
3063 }
3064
3065 status = (CA_R0ATT << (channelno * 4));
3066 WRITE_CSR_REG(hba, FC_CA_REG(hba),
3067 (volatile uint32_t)status);
3068
3069 /* Check tx queue one more time before releasing */
3070 if ((iocbq = emlxs_tx_get(cp, 1))) {
3071 /*
3072 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
3073 * "%s host=%d port=%d RACE CONDITION1
3074 * DETECTED.", emlxs_ring_xlate(channelno),
3075 * rp->fc_cmdidx, rp->fc_port_cmdidx);
3076 */
3077 goto sendit;
3078 }
3079 }
3080
3081 #ifdef FMA_SUPPORT
3082 /* Access handle validation */
3083 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3084 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3085 #endif /* FMA_SUPPORT */
3086
3087 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3088
3089 return;
3090
3091 busy:
3092
3093 /*
3094 * Set ring to SET R0CE_REQ in Chip Att register.
3095 * Chip will tell us when an entry is freed.
3096 */
3097 if (count) {
3098 /* Update the adapter's cmd put index */
3099 if (hba->bus_type == SBUS_FC) {
3100 slim2p->mbx.us.s2.host[channelno].cmdPutInx =
3101 BE_SWAP32(rp->fc_cmdidx);
3102
3103 /* DMA sync the index for the adapter */
3104 offset = (off_t)
3105 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3106 host[channelno].cmdPutInx)) -
3107 (uint64_t)((unsigned long)slim2p));
3108 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3109 offset, 4, DDI_DMA_SYNC_FORDEV);
3110 } else {
3111 ioa2 =
3112 (void *)((char *)hba->sli.sli3.slim_addr +
3113 hba->sli.sli3.hgp_ring_offset +
3114 ((channelno * 2) * sizeof (uint32_t)));
3115 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3116 rp->fc_cmdidx);
3117 }
3118 }
3119
3120 status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
3121 WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
3122
3123 if (throttle <= 0) {
3124 HBASTATS.IocbThrottled++;
3125 } else {
3126 HBASTATS.IocbRingFull[channelno]++;
3127 }
3128
3129 #ifdef FMA_SUPPORT
3130 /* Access handle validation */
3131 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3132 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3133 #endif /* FMA_SUPPORT */
3134
3135 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3136
3137 return;
3138
3139 } /* emlxs_sli3_issue_iocb_cmd() */
3140
3141
3142 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
3143 /* MBX_WAIT - returns MBX_TIMEOUT or mailbox_status */
3144 /* MBX_SLEEP - returns MBX_TIMEOUT or mailbox_status */
3145 /* MBX_POLL - returns MBX_TIMEOUT or mailbox_status */
3146
3147 static uint32_t
emlxs_sli3_issue_mbox_cmd(emlxs_hba_t * hba,MAILBOXQ * mbq,int32_t flag,uint32_t tmo)3148 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3149 uint32_t tmo)
3150 {
3151 emlxs_port_t *port;
3152 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3153 MAILBOX *mbox;
3154 MAILBOX *mb;
3155 volatile uint32_t word0;
3156 volatile uint32_t ldata;
3157 off_t offset;
3158 MATCHMAP *mbox_bp;
3159 uint32_t tmo_local;
3160 MAILBOX *swpmb;
3161
3162 if (!mbq->port) {
3163 mbq->port = &PPORT;
3164 }
3165
3166 port = (emlxs_port_t *)mbq->port;
3167
3168 mb = (MAILBOX *)mbq;
3169 swpmb = (MAILBOX *)&word0;
3170
3171 mb->mbxStatus = MBX_SUCCESS;
3172
3173 /* Check for minimum timeouts */
3174 switch (mb->mbxCommand) {
3175 /* Mailbox commands that erase/write flash */
3176 case MBX_DOWN_LOAD:
3177 case MBX_UPDATE_CFG:
3178 case MBX_LOAD_AREA:
3179 case MBX_LOAD_EXP_ROM:
3180 case MBX_WRITE_NV:
3181 case MBX_FLASH_WR_ULA:
3182 case MBX_DEL_LD_ENTRY:
3183 case MBX_LOAD_SM:
3184 if (tmo < 300) {
3185 tmo = 300;
3186 }
3187 break;
3188
3189 default:
3190 if (tmo < 30) {
3191 tmo = 30;
3192 }
3193 break;
3194 }
3195
3196 /* Convert tmo seconds to 10 millisecond tics */
3197 tmo_local = tmo * 100;
3198
3199 /* Adjust wait flag */
3200 if (flag != MBX_NOWAIT) {
3201 /* If interrupt is enabled, use sleep, otherwise poll */
3202 if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3203 flag = MBX_SLEEP;
3204 } else {
3205 flag = MBX_POLL;
3206 }
3207 }
3208
3209 mutex_enter(&EMLXS_PORT_LOCK);
3210
3211 /* Check for hardware error */
3212 if (hba->flag & FC_HARDWARE_ERROR) {
3213 mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3214 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3215
3216 mutex_exit(&EMLXS_PORT_LOCK);
3217
3218 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3219 "Hardware error reported. %s failed. status=%x mb=%p",
3220 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3221
3222 return (MBX_HARDWARE_ERROR);
3223 }
3224
3225 if (hba->mbox_queue_flag) {
3226 /* If we are not polling, then queue it for later */
3227 if (flag == MBX_NOWAIT) {
3228 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3229 "Busy. %s: mb=%p NoWait.",
3230 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3231
3232 emlxs_mb_put(hba, mbq);
3233
3234 HBASTATS.MboxBusy++;
3235
3236 mutex_exit(&EMLXS_PORT_LOCK);
3237
3238 return (MBX_BUSY);
3239 }
3240
3241 while (hba->mbox_queue_flag) {
3242 mutex_exit(&EMLXS_PORT_LOCK);
3243
3244 if (tmo_local-- == 0) {
3245 EMLXS_MSGF(EMLXS_CONTEXT,
3246 &emlxs_mbox_event_msg,
3247 "Timeout. %s: mb=%p tmo=%d Waiting.",
3248 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3249 tmo);
3250
3251 /* Non-lethalStatus mailbox timeout */
3252 /* Does not indicate a hardware error */
3253 mb->mbxStatus = MBX_TIMEOUT;
3254 return (MBX_TIMEOUT);
3255 }
3256
3257 BUSYWAIT_MS(10);
3258 mutex_enter(&EMLXS_PORT_LOCK);
3259
3260 /* Check for hardware error */
3261 if (hba->flag & FC_HARDWARE_ERROR) {
3262 mb->mbxStatus =
3263 (hba->flag & FC_OVERTEMP_EVENT) ?
3264 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3265
3266 mutex_exit(&EMLXS_PORT_LOCK);
3267
3268 EMLXS_MSGF(EMLXS_CONTEXT,
3269 &emlxs_mbox_detail_msg,
3270 "Hardware error reported. %s failed. "
3271 "status=%x mb=%p",
3272 emlxs_mb_cmd_xlate(mb->mbxCommand),
3273 mb->mbxStatus, mb);
3274
3275 return (MBX_HARDWARE_ERROR);
3276 }
3277 }
3278 }
3279
3280 /* Initialize mailbox area */
3281 emlxs_mb_init(hba, mbq, flag, tmo);
3282
3283 switch (flag) {
3284 case MBX_NOWAIT:
3285
3286 if (mb->mbxCommand != MBX_HEARTBEAT) {
3287 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3288 mb->mbxCommand != MBX_DUMP_MEMORY) {
3289 EMLXS_MSGF(EMLXS_CONTEXT,
3290 &emlxs_mbox_detail_msg,
3291 "Sending. %s: mb=%p NoWait.",
3292 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3293 }
3294 }
3295
3296 break;
3297
3298 case MBX_SLEEP:
3299 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3300 mb->mbxCommand != MBX_DUMP_MEMORY) {
3301 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3302 "Sending. %s: mb=%p Sleep.",
3303 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3304 }
3305
3306 break;
3307
3308 case MBX_POLL:
3309 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3310 mb->mbxCommand != MBX_DUMP_MEMORY) {
3311 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3312 "Sending. %s: mb=%p Polled.",
3313 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3314 }
3315 break;
3316 }
3317
3318 mb->mbxOwner = OWN_CHIP;
3319
3320 /* Clear the attention bit */
3321 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3322
3323 if (hba->flag & FC_SLIM2_MODE) {
3324 /* First copy command data */
3325 mbox = FC_SLIM2_MAILBOX(hba);
3326 offset =
3327 (off_t)((uint64_t)((unsigned long)mbox)
3328 - (uint64_t)((unsigned long)slim2p));
3329
3330 #ifdef MBOX_EXT_SUPPORT
3331 if (mbq->extbuf) {
3332 uint32_t *mbox_ext =
3333 (uint32_t *)((uint8_t *)mbox +
3334 MBOX_EXTENSION_OFFSET);
3335 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
3336
3337 BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3338 (uint8_t *)mbox_ext, mbq->extsize);
3339
3340 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3341 offset_ext, mbq->extsize,
3342 DDI_DMA_SYNC_FORDEV);
3343 }
3344 #endif /* MBOX_EXT_SUPPORT */
3345
3346 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3347 MAILBOX_CMD_BSIZE);
3348
3349 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3350 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3351 } else { /* SLIM 1 */
3352
3353 mbox = FC_SLIM1_MAILBOX(hba);
3354
3355 #ifdef MBOX_EXT_SUPPORT
3356 if (mbq->extbuf) {
3357 uint32_t *mbox_ext =
3358 (uint32_t *)((uint8_t *)mbox +
3359 MBOX_EXTENSION_OFFSET);
3360 WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3361 mbox_ext, (mbq->extsize / 4));
3362 }
3363 #endif /* MBOX_EXT_SUPPORT */
3364
3365 /* First copy command data */
3366 WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3367 (MAILBOX_CMD_WSIZE - 1));
3368
3369 /* copy over last word, with mbxOwner set */
3370 ldata = *((volatile uint32_t *)mb);
3371 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3372 }
3373
3374 /* Interrupt board to do it right away */
3375 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3376
3377 mutex_exit(&EMLXS_PORT_LOCK);
3378
3379 #ifdef FMA_SUPPORT
3380 /* Access handle validation */
3381 if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3382 != DDI_FM_OK) ||
3383 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3384 != DDI_FM_OK)) {
3385 EMLXS_MSGF(EMLXS_CONTEXT,
3386 &emlxs_invalid_access_handle_msg, NULL);
3387 return (MBX_HARDWARE_ERROR);
3388 }
3389 #endif /* FMA_SUPPORT */
3390
3391 switch (flag) {
3392 case MBX_NOWAIT:
3393 return (MBX_SUCCESS);
3394
3395 case MBX_SLEEP:
3396
3397 /* Wait for completion */
3398 /* The driver clock is timing the mailbox. */
3399 /* emlxs_mb_fini() will be called externally. */
3400
3401 mutex_enter(&EMLXS_MBOX_LOCK);
3402 while (!(mbq->flag & MBQ_COMPLETED)) {
3403 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3404 }
3405 mutex_exit(&EMLXS_MBOX_LOCK);
3406
3407 if (mb->mbxStatus == MBX_TIMEOUT) {
3408 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3409 "Timeout. %s: mb=%p tmo=%d. Sleep.",
3410 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3411 } else {
3412 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3413 mb->mbxCommand != MBX_DUMP_MEMORY) {
3414 EMLXS_MSGF(EMLXS_CONTEXT,
3415 &emlxs_mbox_detail_msg,
3416 "Completed. %s: mb=%p status=%x Sleep.",
3417 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3418 mb->mbxStatus);
3419 }
3420 }
3421
3422 break;
3423
3424 case MBX_POLL:
3425
3426 /* Convert tmo seconds to 500 usec tics */
3427 tmo_local = tmo * 2000;
3428
3429 /* Get first word of mailbox */
3430 if (hba->flag & FC_SLIM2_MODE) {
3431 mbox = FC_SLIM2_MAILBOX(hba);
3432 offset = (off_t)((uint64_t)((unsigned long)mbox) -
3433 (uint64_t)((unsigned long)slim2p));
3434
3435 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3436 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3437 word0 = *((volatile uint32_t *)mbox);
3438 word0 = BE_SWAP32(word0);
3439 } else {
3440 mbox = FC_SLIM1_MAILBOX(hba);
3441 word0 =
3442 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3443 }
3444
3445 /* Wait for command to complete */
3446 while ((swpmb->mbxOwner == OWN_CHIP) &&
3447 !(mbq->flag & MBQ_COMPLETED)) {
3448 if (!hba->timer_id && (tmo_local-- == 0)) {
3449 /* self time */
3450 EMLXS_MSGF(EMLXS_CONTEXT,
3451 &emlxs_mbox_timeout_msg,
3452 "%s: mb=%p tmo=%d Polled.",
3453 emlxs_mb_cmd_xlate(mb->mbxCommand),
3454 mb, tmo);
3455
3456 hba->flag |= FC_MBOX_TIMEOUT;
3457 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3458 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3459
3460 break;
3461 }
3462
3463 BUSYWAIT_US(500);
3464
3465 /* Get first word of mailbox */
3466 if (hba->flag & FC_SLIM2_MODE) {
3467 EMLXS_MPDATA_SYNC(
3468 hba->sli.sli3.slim2.dma_handle, offset,
3469 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3470 word0 = *((volatile uint32_t *)mbox);
3471 word0 = BE_SWAP32(word0);
3472 } else {
3473 word0 =
3474 READ_SLIM_ADDR(hba,
3475 ((volatile uint32_t *)mbox));
3476 }
3477
3478 } /* while */
3479
3480 if (mb->mbxStatus == MBX_TIMEOUT) {
3481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3482 "Timeout. %s: mb=%p tmo=%d. Polled.",
3483 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3484
3485 break;
3486 }
3487
3488 /* Check for config port command */
3489 if ((swpmb->mbxCommand == MBX_CONFIG_PORT) &&
3490 (swpmb->mbxStatus == MBX_SUCCESS)) {
3491 /* Setup host mbox for cmpl */
3492 mbox = FC_SLIM2_MAILBOX(hba);
3493 offset = (off_t)((uint64_t)((unsigned long)mbox)
3494 - (uint64_t)((unsigned long)slim2p));
3495
3496 hba->flag |= FC_SLIM2_MODE;
3497 }
3498
3499 /* copy results back to user */
3500 if (hba->flag & FC_SLIM2_MODE) {
3501 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3502 offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3503
3504 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3505 MAILBOX_CMD_BSIZE);
3506 } else {
3507 READ_SLIM_COPY(hba, (uint32_t *)mb,
3508 (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3509 }
3510
3511 #ifdef MBOX_EXT_SUPPORT
3512 if (mbq->extbuf) {
3513 uint32_t *mbox_ext =
3514 (uint32_t *)((uint8_t *)mbox +
3515 MBOX_EXTENSION_OFFSET);
3516 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
3517
3518 if (hba->flag & FC_SLIM2_MODE) {
3519 EMLXS_MPDATA_SYNC(
3520 hba->sli.sli3.slim2.dma_handle, offset_ext,
3521 mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3522
3523 BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3524 (uint8_t *)mbq->extbuf, mbq->extsize);
3525 } else {
3526 READ_SLIM_COPY(hba,
3527 (uint32_t *)mbq->extbuf, mbox_ext,
3528 (mbq->extsize / 4));
3529 }
3530 }
3531 #endif /* MBOX_EXT_SUPPORT */
3532
3533 /* Sync the memory buffer */
3534 if (mbq->bp) {
3535 mbox_bp = (MATCHMAP *)mbq->bp;
3536 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3537 mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3538 }
3539
3540 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3541 mb->mbxCommand != MBX_DUMP_MEMORY) {
3542 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3543 "Completed. %s: mb=%p status=%x Polled.",
3544 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3545 mb->mbxStatus);
3546 }
3547
3548 /* Process the result */
3549 if (!(mbq->flag & MBQ_PASSTHRU)) {
3550 if (mbq->mbox_cmpl) {
3551 (void) (mbq->mbox_cmpl)(hba, mbq);
3552 }
3553 }
3554
3555 /* Clear the attention bit */
3556 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3557
3558 /* Clean up the mailbox area */
3559 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3560
3561 break;
3562
3563 } /* switch (flag) */
3564
3565 return (mb->mbxStatus);
3566
3567 } /* emlxs_sli3_issue_mbox_cmd() */
3568
3569
3570 #ifdef SFCT_SUPPORT
3571 /*ARGSUSED*/
3572 static uint32_t
emlxs_sli3_prep_fct_iocb(emlxs_port_t * port,emlxs_buf_t * cmd_sbp,int channel)3573 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3574 int channel)
3575 {
3576 emlxs_hba_t *hba = HBA;
3577 emlxs_config_t *cfg = &CFG;
3578 fct_cmd_t *fct_cmd;
3579 stmf_data_buf_t *dbuf;
3580 scsi_task_t *fct_task;
3581 fc_packet_t *pkt;
3582 uint32_t did;
3583 IOCBQ *iocbq;
3584 IOCB *iocb;
3585 uint32_t timeout;
3586 uint32_t iotag;
3587 emlxs_node_t *ndlp;
3588 CHANNEL *cp;
3589 ddi_dma_cookie_t *cp_cmd;
3590
3591 pkt = PRIV2PKT(cmd_sbp);
3592
3593 cp = (CHANNEL *)cmd_sbp->channel;
3594
3595 iocbq = &cmd_sbp->iocbq;
3596 iocb = &iocbq->iocb;
3597
3598
3599 /* Get the iotag by registering the packet */
3600 iotag = emlxs_register_pkt(cp, cmd_sbp);
3601
3602 if (!iotag) {
3603 /* No more command slots available, retry later */
3604 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3605 "Adapter Busy. Unable to allocate iotag. did=0x%x",
3606 cmd_sbp->did);
3607
3608 return (IOERR_NO_RESOURCES);
3609 }
3610
3611
3612 /* Point of no return */
3613
3614 if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
3615
3616 ndlp = cmd_sbp->node;
3617 cp->ulpSendCmd++;
3618
3619 /* Initalize iocbq */
3620 iocbq->port = (void *)port;
3621 iocbq->node = (void *)ndlp;
3622 iocbq->channel = (void *)cp;
3623
3624 /*
3625 * Don't give the abort priority, we want the IOCB
3626 * we are aborting to be processed first.
3627 */
3628 iocbq->flag |= IOCB_SPECIAL;
3629
3630 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
3631 iocb->ULPIOTAG = (uint16_t)iotag;
3632 iocb->ULPLE = 1;
3633 iocb->ULPCLASS = cmd_sbp->class;
3634 iocb->ULPOWNER = OWN_CHIP;
3635
3636 if (hba->state >= FC_LINK_UP) {
3637 /* Create the abort IOCB */
3638 iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
3639 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
3640
3641 } else {
3642 /* Create the close IOCB */
3643 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
3644
3645 }
3646
3647 iocb->ULPRSVDBYTE =
3648 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3649 /* Set the pkt timer */
3650 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3651 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3652
3653 return (IOERR_SUCCESS);
3654
3655 } else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
3656
3657 ndlp = cmd_sbp->node;
3658 cp->ulpSendCmd++;
3659
3660 /* Initalize iocbq */
3661 iocbq->port = (void *)port;
3662 iocbq->node = (void *)ndlp;
3663 iocbq->channel = (void *)cp;
3664
3665 #if (EMLXS_MODREV >= EMLXS_MODREV3)
3666 cp_cmd = pkt->pkt_cmd_cookie;
3667 #else
3668 cp_cmd = &pkt->pkt_cmd_cookie;
3669 #endif /* >= EMLXS_MODREV3 */
3670
3671 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
3672 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
3673 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
3674 iocb->un.fcpt64.bdl.bdeFlags = 0;
3675
3676 if (hba->sli_mode < 3) {
3677 iocb->ULPBDECOUNT = 1;
3678 iocb->ULPLE = 1;
3679 } else { /* SLI3 */
3680
3681 iocb->ULPBDECOUNT = 0;
3682 iocb->ULPLE = 0;
3683 iocb->unsli3.ext_iocb.ebde_count = 0;
3684 }
3685
3686 /* Initalize iocb */
3687 iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
3688 iocb->ULPIOTAG = (uint16_t)iotag;
3689 iocb->ULPRSVDBYTE =
3690 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3691 iocb->ULPOWNER = OWN_CHIP;
3692 iocb->ULPCLASS = cmd_sbp->class;
3693 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
3694
3695 /* Set the pkt timer */
3696 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3697 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3698
3699 if (pkt->pkt_cmdlen) {
3700 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
3701 DDI_DMA_SYNC_FORDEV);
3702 }
3703
3704 return (IOERR_SUCCESS);
3705 }
3706
3707 dbuf = cmd_sbp->fct_buf;
3708 fct_cmd = cmd_sbp->fct_cmd;
3709 fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3710 ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3711 did = fct_cmd->cmd_rportid;
3712
3713 iocbq->channel = (void *)cmd_sbp->channel;
3714
3715 if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3716 /* Unregister the packet */
3717 (void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3718
3719 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3720 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3721
3722 return (IOERR_INTERNAL_ERROR);
3723 }
3724
3725 if (cfg[CFG_TIMEOUT_ENABLE].current) {
3726 timeout =
3727 ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3728 } else {
3729 timeout = 0x80000000;
3730 }
3731
3732 cmd_sbp->ticks =
3733 hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3734
3735 /* Initalize iocbq */
3736 iocbq->port = (void *)port;
3737 iocbq->node = (void *)ndlp;
3738
3739 /* Initalize iocb */
3740 iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3741 iocb->ULPIOTAG = (uint16_t)iotag;
3742 iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3743 iocb->ULPOWNER = OWN_CHIP;
3744 iocb->ULPCLASS = cmd_sbp->class;
3745
3746 iocb->ULPPU = 1; /* Wd4 is relative offset */
3747 iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3748
3749 if (fct_task->task_flags & TF_WRITE_DATA) {
3750 iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3751 } else { /* TF_READ_DATA */
3752
3753 iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3754
3755 if ((hba->sli_mode == EMLXS_HBA_SLI3_MODE) &&
3756 (dbuf->db_data_size >=
3757 fct_task->task_expected_xfer_length)) {
3758 iocb->ULPCT = 0x1;
3759 /* enable auto-rsp AP feature */
3760 }
3761 }
3762
3763 return (IOERR_SUCCESS);
3764
3765 } /* emlxs_sli3_prep_fct_iocb() */
3766 #endif /* SFCT_SUPPORT */
3767
3768 /* ARGSUSED */
3769 static uint32_t
emlxs_sli3_prep_fcp_iocb(emlxs_port_t * port,emlxs_buf_t * sbp,int channel)3770 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3771 {
3772 emlxs_hba_t *hba = HBA;
3773 fc_packet_t *pkt;
3774 CHANNEL *cp;
3775 IOCBQ *iocbq;
3776 IOCB *iocb;
3777 NODELIST *ndlp;
3778 uint16_t iotag;
3779 uint32_t did;
3780
3781 pkt = PRIV2PKT(sbp);
3782 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3783 cp = &hba->chan[FC_FCP_RING];
3784
3785 iocbq = &sbp->iocbq;
3786 iocb = &iocbq->iocb;
3787
3788 /* Find target node object */
3789 ndlp = (NODELIST *)iocbq->node;
3790
3791 /* Get the iotag by registering the packet */
3792 iotag = emlxs_register_pkt(cp, sbp);
3793
3794 if (!iotag) {
3795 /*
3796 * No more command slots available, retry later
3797 */
3798 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3799 "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3800
3801 return (FC_TRAN_BUSY);
3802 }
3803
3804 /* Initalize iocbq */
3805 iocbq->port = (void *) port;
3806 iocbq->channel = (void *) cp;
3807
3808 /* Indicate this is a FCP cmd */
3809 iocbq->flag |= IOCB_FCP_CMD;
3810
3811 if (emlxs_bde_setup(port, sbp)) {
3812 /* Unregister the packet */
3813 (void) emlxs_unregister_pkt(cp, iotag, 0);
3814
3815 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3816 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3817
3818 return (FC_TRAN_BUSY);
3819 }
3820 /* Point of no return */
3821
3822 /* Initalize iocb */
3823 iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3824 iocb->ULPIOTAG = iotag;
3825 iocb->ULPRSVDBYTE =
3826 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3827 iocb->ULPOWNER = OWN_CHIP;
3828
3829 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3830 case FC_TRAN_CLASS1:
3831 iocb->ULPCLASS = CLASS1;
3832 break;
3833 case FC_TRAN_CLASS2:
3834 iocb->ULPCLASS = CLASS2;
3835 /* iocb->ULPCLASS = CLASS3; */
3836 break;
3837 case FC_TRAN_CLASS3:
3838 default:
3839 iocb->ULPCLASS = CLASS3;
3840 break;
3841 }
3842
3843 /* if device is FCP-2 device, set the following bit */
3844 /* that says to run the FC-TAPE protocol. */
3845 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3846 iocb->ULPFCP2RCVY = 1;
3847 }
3848
3849 if (pkt->pkt_datalen == 0) {
3850 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3851 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3852 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3853 iocb->ULPPU = PARM_XFER_CHECK;
3854 iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3855 } else {
3856 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3857 }
3858
3859 return (FC_SUCCESS);
3860
3861 } /* emlxs_sli3_prep_fcp_iocb() */
3862
3863
3864 static uint32_t
emlxs_sli3_prep_ip_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)3865 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3866 {
3867 emlxs_hba_t *hba = HBA;
3868 fc_packet_t *pkt;
3869 IOCBQ *iocbq;
3870 IOCB *iocb;
3871 CHANNEL *cp;
3872 NODELIST *ndlp;
3873 uint16_t iotag;
3874 uint32_t did;
3875
3876 pkt = PRIV2PKT(sbp);
3877 cp = &hba->chan[FC_IP_RING];
3878 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3879
3880 iocbq = &sbp->iocbq;
3881 iocb = &iocbq->iocb;
3882 ndlp = (NODELIST *)iocbq->node;
3883
3884 /* Get the iotag by registering the packet */
3885 iotag = emlxs_register_pkt(cp, sbp);
3886
3887 if (!iotag) {
3888 /*
3889 * No more command slots available, retry later
3890 */
3891 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3892 "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3893
3894 return (FC_TRAN_BUSY);
3895 }
3896
3897 /* Initalize iocbq */
3898 iocbq->port = (void *) port;
3899 iocbq->channel = (void *) cp;
3900
3901 if (emlxs_bde_setup(port, sbp)) {
3902 /* Unregister the packet */
3903 (void) emlxs_unregister_pkt(cp, iotag, 0);
3904
3905 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3906 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3907
3908 return (FC_TRAN_BUSY);
3909 }
3910 /* Point of no return */
3911
3912 /* Initalize iocb */
3913 iocb->un.xseq64.w5.hcsw.Fctl = 0;
3914
3915 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3916 iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3917 }
3918 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3919 iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3920 }
3921
3922 /* network headers */
3923 iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3924 iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3925 iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3926
3927 iocb->ULPIOTAG = iotag;
3928 iocb->ULPRSVDBYTE =
3929 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3930 iocb->ULPOWNER = OWN_CHIP;
3931
3932 if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3933 HBASTATS.IpBcastIssued++;
3934
3935 iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3936 iocb->ULPCONTEXT = 0;
3937
3938 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
3939 if (hba->topology != TOPOLOGY_LOOP) {
3940 iocb->ULPCT = 0x1;
3941 }
3942 iocb->ULPCONTEXT = port->vpi;
3943 }
3944 } else {
3945 HBASTATS.IpSeqIssued++;
3946
3947 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3948 iocb->ULPCONTEXT = ndlp->nlp_Xri;
3949 }
3950
3951 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3952 case FC_TRAN_CLASS1:
3953 iocb->ULPCLASS = CLASS1;
3954 break;
3955 case FC_TRAN_CLASS2:
3956 iocb->ULPCLASS = CLASS2;
3957 break;
3958 case FC_TRAN_CLASS3:
3959 default:
3960 iocb->ULPCLASS = CLASS3;
3961 break;
3962 }
3963
3964 return (FC_SUCCESS);
3965
3966 } /* emlxs_sli3_prep_ip_iocb() */
3967
3968
3969 static uint32_t
emlxs_sli3_prep_els_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)3970 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3971 {
3972 emlxs_hba_t *hba = HBA;
3973 fc_packet_t *pkt;
3974 IOCBQ *iocbq;
3975 IOCB *iocb;
3976 CHANNEL *cp;
3977 uint16_t iotag;
3978 uint32_t did;
3979 uint32_t cmd;
3980
3981 pkt = PRIV2PKT(sbp);
3982 cp = &hba->chan[FC_ELS_RING];
3983 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3984
3985 iocbq = &sbp->iocbq;
3986 iocb = &iocbq->iocb;
3987
3988
3989 /* Get the iotag by registering the packet */
3990 iotag = emlxs_register_pkt(cp, sbp);
3991
3992 if (!iotag) {
3993 /*
3994 * No more command slots available, retry later
3995 */
3996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3997 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3998
3999 return (FC_TRAN_BUSY);
4000 }
4001 /* Initalize iocbq */
4002 iocbq->port = (void *) port;
4003 iocbq->channel = (void *) cp;
4004
4005 if (emlxs_bde_setup(port, sbp)) {
4006 /* Unregister the packet */
4007 (void) emlxs_unregister_pkt(cp, iotag, 0);
4008
4009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4010 "Adapter Busy. Unable to setup buffer list. did=%x", did);
4011
4012 return (FC_TRAN_BUSY);
4013 }
4014 /* Point of no return */
4015
4016 /* Initalize iocb */
4017 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4018 /* ELS Response */
4019 iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
4020 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4021 } else {
4022 /* ELS Request */
4023 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4024 iocb->ULPCONTEXT =
4025 (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
4026 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4027
4028 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
4029 if (hba->topology != TOPOLOGY_LOOP) {
4030 cmd = *((uint32_t *)pkt->pkt_cmd);
4031 cmd &= ELS_CMD_MASK;
4032
4033 if ((cmd == ELS_CMD_FLOGI) ||
4034 (cmd == ELS_CMD_FDISC)) {
4035 iocb->ULPCT = 0x2;
4036 } else {
4037 iocb->ULPCT = 0x1;
4038 }
4039 }
4040 iocb->ULPCONTEXT = port->vpi;
4041 }
4042 }
4043 iocb->ULPIOTAG = iotag;
4044 iocb->ULPRSVDBYTE =
4045 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4046 iocb->ULPOWNER = OWN_CHIP;
4047
4048 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4049 case FC_TRAN_CLASS1:
4050 iocb->ULPCLASS = CLASS1;
4051 break;
4052 case FC_TRAN_CLASS2:
4053 iocb->ULPCLASS = CLASS2;
4054 break;
4055 case FC_TRAN_CLASS3:
4056 default:
4057 iocb->ULPCLASS = CLASS3;
4058 break;
4059 }
4060 sbp->class = iocb->ULPCLASS;
4061
4062 return (FC_SUCCESS);
4063
4064 } /* emlxs_sli3_prep_els_iocb() */
4065
4066
4067 static uint32_t
emlxs_sli3_prep_ct_iocb(emlxs_port_t * port,emlxs_buf_t * sbp)4068 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4069 {
4070 emlxs_hba_t *hba = HBA;
4071 fc_packet_t *pkt;
4072 IOCBQ *iocbq;
4073 IOCB *iocb;
4074 CHANNEL *cp;
4075 NODELIST *ndlp;
4076 uint16_t iotag;
4077 uint32_t did;
4078
4079 pkt = PRIV2PKT(sbp);
4080 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4081 cp = &hba->chan[FC_CT_RING];
4082
4083 iocbq = &sbp->iocbq;
4084 iocb = &iocbq->iocb;
4085 ndlp = (NODELIST *)iocbq->node;
4086
4087 /* Get the iotag by registering the packet */
4088 iotag = emlxs_register_pkt(cp, sbp);
4089
4090 if (!iotag) {
4091 /*
4092 * No more command slots available, retry later
4093 */
4094 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4095 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
4096
4097 return (FC_TRAN_BUSY);
4098 }
4099
4100 if (emlxs_bde_setup(port, sbp)) {
4101 /* Unregister the packet */
4102 (void) emlxs_unregister_pkt(cp, iotag, 0);
4103
4104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4105 "Adapter Busy. Unable to setup buffer list. did=%x", did);
4106
4107 return (FC_TRAN_BUSY);
4108 }
4109
4110 /* Point of no return */
4111
4112 /* Initalize iocbq */
4113 iocbq->port = (void *) port;
4114 iocbq->channel = (void *) cp;
4115
4116 /* Fill in rest of iocb */
4117 iocb->un.genreq64.w5.hcsw.Fctl = LA;
4118
4119 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4120 iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
4121 }
4122 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4123 iocb->un.genreq64.w5.hcsw.Fctl |= SI;
4124 }
4125
4126 /* Initalize iocb */
4127 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4128 /* CT Response */
4129 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
4130 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
4131 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
4132 } else {
4133 /* CT Request */
4134 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4135 iocb->un.genreq64.w5.hcsw.Dfctl = 0;
4136 iocb->ULPCONTEXT = ndlp->nlp_Rpi;
4137 }
4138
4139 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4140 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
4141
4142 iocb->ULPIOTAG = iotag;
4143 iocb->ULPRSVDBYTE =
4144 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4145 iocb->ULPOWNER = OWN_CHIP;
4146
4147 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4148 case FC_TRAN_CLASS1:
4149 iocb->ULPCLASS = CLASS1;
4150 break;
4151 case FC_TRAN_CLASS2:
4152 iocb->ULPCLASS = CLASS2;
4153 break;
4154 case FC_TRAN_CLASS3:
4155 default:
4156 iocb->ULPCLASS = CLASS3;
4157 break;
4158 }
4159
4160 return (FC_SUCCESS);
4161
4162 } /* emlxs_sli3_prep_ct_iocb() */
4163
4164
4165 #ifdef SFCT_SUPPORT
4166 static uint32_t
emlxs_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)4167 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4168 {
4169 emlxs_hba_t *hba = HBA;
4170 uint32_t rval;
4171
4172 if (sbp->fct_buf->db_sglist_length != 1) {
4173 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
4174 "fct_bde_setup: Only 1 sglist entry supported: %d",
4175 sbp->fct_buf->db_sglist_length);
4176 return (1);
4177 }
4178
4179 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4180 rval = emlxs_sli2_fct_bde_setup(port, sbp);
4181 } else {
4182 rval = emlxs_sli3_fct_bde_setup(port, sbp);
4183 }
4184
4185 return (rval);
4186
4187 } /* emlxs_fct_bde_setup() */
4188 #endif /* SFCT_SUPPORT */
4189
4190
4191 static uint32_t
emlxs_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)4192 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4193 {
4194 uint32_t rval;
4195 emlxs_hba_t *hba = HBA;
4196
4197 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4198 rval = emlxs_sli2_bde_setup(port, sbp);
4199 } else {
4200 rval = emlxs_sli3_bde_setup(port, sbp);
4201 }
4202
4203 return (rval);
4204
4205 } /* emlxs_bde_setup() */
4206
4207
4208 static void
emlxs_sli3_poll_intr(emlxs_hba_t * hba)4209 emlxs_sli3_poll_intr(emlxs_hba_t *hba)
4210 {
4211 uint32_t ha_copy;
4212
4213 /* Check attention bits once and process if required */
4214
4215 ha_copy = emlxs_check_attention(hba);
4216
4217 if (ha_copy == 0) {
4218 return;
4219 }
4220
4221 mutex_enter(&EMLXS_PORT_LOCK);
4222 ha_copy = emlxs_get_attention(hba, -1);
4223 mutex_exit(&EMLXS_PORT_LOCK);
4224
4225 emlxs_proc_attention(hba, ha_copy);
4226
4227 return;
4228
4229 } /* emlxs_sli3_poll_intr() */
4230
4231
4232 #ifdef MSI_SUPPORT
4233 static uint32_t
emlxs_sli3_msi_intr(char * arg1,char * arg2)4234 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4235 {
4236 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4237 #ifdef FMA_SUPPORT
4238 emlxs_port_t *port = &PPORT;
4239 #endif /* FMA_SUPPORT */
4240 uint16_t msgid;
4241 uint32_t hc_copy;
4242 uint32_t ha_copy;
4243 uint32_t restore = 0;
4244
4245 /*
4246 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4247 * "sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4248 */
4249
4250 /* Check for legacy interrupt handling */
4251 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4252 mutex_enter(&EMLXS_PORT_LOCK);
4253
4254 if (hba->flag & FC_OFFLINE_MODE) {
4255 mutex_exit(&EMLXS_PORT_LOCK);
4256
4257 if (hba->bus_type == SBUS_FC) {
4258 return (DDI_INTR_CLAIMED);
4259 } else {
4260 return (DDI_INTR_UNCLAIMED);
4261 }
4262 }
4263
4264 /* Get host attention bits */
4265 ha_copy = emlxs_get_attention(hba, -1);
4266
4267 if (ha_copy == 0) {
4268 if (hba->intr_unclaimed) {
4269 mutex_exit(&EMLXS_PORT_LOCK);
4270 return (DDI_INTR_UNCLAIMED);
4271 }
4272
4273 hba->intr_unclaimed = 1;
4274 } else {
4275 hba->intr_unclaimed = 0;
4276 }
4277
4278 mutex_exit(&EMLXS_PORT_LOCK);
4279
4280 /* Process the interrupt */
4281 emlxs_proc_attention(hba, ha_copy);
4282
4283 return (DDI_INTR_CLAIMED);
4284 }
4285
4286 /* DDI_INTR_TYPE_MSI */
4287 /* DDI_INTR_TYPE_MSIX */
4288
4289 /* Get MSI message id */
4290 msgid = (uint16_t)((unsigned long)arg2);
4291
4292 /* Validate the message id */
4293 if (msgid >= hba->intr_count) {
4294 msgid = 0;
4295 }
4296
4297 mutex_enter(&EMLXS_INTR_LOCK(msgid));
4298
4299 mutex_enter(&EMLXS_PORT_LOCK);
4300
4301 /* Check if adapter is offline */
4302 if (hba->flag & FC_OFFLINE_MODE) {
4303 mutex_exit(&EMLXS_PORT_LOCK);
4304 mutex_exit(&EMLXS_INTR_LOCK(msgid));
4305
4306 /* Always claim an MSI interrupt */
4307 return (DDI_INTR_CLAIMED);
4308 }
4309
4310 /* Disable interrupts associated with this msgid */
4311 if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4312 hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4313 WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4314 restore = 1;
4315 }
4316
4317 /* Get host attention bits */
4318 ha_copy = emlxs_get_attention(hba, msgid);
4319
4320 mutex_exit(&EMLXS_PORT_LOCK);
4321
4322 /* Process the interrupt */
4323 emlxs_proc_attention(hba, ha_copy);
4324
4325 /* Restore interrupts */
4326 if (restore) {
4327 mutex_enter(&EMLXS_PORT_LOCK);
4328 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4329 #ifdef FMA_SUPPORT
4330 /* Access handle validation */
4331 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4332 #endif /* FMA_SUPPORT */
4333 mutex_exit(&EMLXS_PORT_LOCK);
4334 }
4335
4336 mutex_exit(&EMLXS_INTR_LOCK(msgid));
4337
4338 return (DDI_INTR_CLAIMED);
4339
4340 } /* emlxs_sli3_msi_intr() */
4341 #endif /* MSI_SUPPORT */
4342
4343
4344 static int
emlxs_sli3_intx_intr(char * arg)4345 emlxs_sli3_intx_intr(char *arg)
4346 {
4347 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4348 uint32_t ha_copy = 0;
4349
4350 mutex_enter(&EMLXS_PORT_LOCK);
4351
4352 if (hba->flag & FC_OFFLINE_MODE) {
4353 mutex_exit(&EMLXS_PORT_LOCK);
4354
4355 if (hba->bus_type == SBUS_FC) {
4356 return (DDI_INTR_CLAIMED);
4357 } else {
4358 return (DDI_INTR_UNCLAIMED);
4359 }
4360 }
4361
4362 /* Get host attention bits */
4363 ha_copy = emlxs_get_attention(hba, -1);
4364
4365 if (ha_copy == 0) {
4366 if (hba->intr_unclaimed) {
4367 mutex_exit(&EMLXS_PORT_LOCK);
4368 return (DDI_INTR_UNCLAIMED);
4369 }
4370
4371 hba->intr_unclaimed = 1;
4372 } else {
4373 hba->intr_unclaimed = 0;
4374 }
4375
4376 mutex_exit(&EMLXS_PORT_LOCK);
4377
4378 /* Process the interrupt */
4379 emlxs_proc_attention(hba, ha_copy);
4380
4381 return (DDI_INTR_CLAIMED);
4382
4383 } /* emlxs_sli3_intx_intr() */
4384
4385
4386 /* EMLXS_PORT_LOCK must be held when call this routine */
4387 static uint32_t
emlxs_get_attention(emlxs_hba_t * hba,int32_t msgid)4388 emlxs_get_attention(emlxs_hba_t *hba, int32_t msgid)
4389 {
4390 #ifdef FMA_SUPPORT
4391 emlxs_port_t *port = &PPORT;
4392 #endif /* FMA_SUPPORT */
4393 uint32_t ha_copy = 0;
4394 uint32_t ha_copy2;
4395 uint32_t mask = hba->sli.sli3.hc_copy;
4396
4397 #ifdef MSI_SUPPORT
4398
4399 read_ha_register:
4400
4401 /* Check for default MSI interrupt */
4402 if (msgid == 0) {
4403 /* Read host attention register to determine interrupt source */
4404 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4405
4406 /* Filter out MSI non-default attention bits */
4407 ha_copy2 &= ~(hba->intr_cond);
4408 }
4409
4410 /* Check for polled or fixed type interrupt */
4411 else if (msgid == -1) {
4412 /* Read host attention register to determine interrupt source */
4413 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4414 }
4415
4416 /* Otherwise, assume a mapped MSI interrupt */
4417 else {
4418 /* Convert MSI msgid to mapped attention bits */
4419 ha_copy2 = hba->intr_map[msgid];
4420 }
4421
4422 #else /* !MSI_SUPPORT */
4423
4424 /* Read host attention register to determine interrupt source */
4425 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4426
4427 #endif /* MSI_SUPPORT */
4428
4429 /* Check if Hardware error interrupt is enabled */
4430 if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4431 ha_copy2 &= ~HA_ERATT;
4432 }
4433
4434 /* Check if link interrupt is enabled */
4435 if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4436 ha_copy2 &= ~HA_LATT;
4437 }
4438
4439 /* Check if Mailbox interrupt is enabled */
4440 if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4441 ha_copy2 &= ~HA_MBATT;
4442 }
4443
4444 /* Check if ring0 interrupt is enabled */
4445 if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4446 ha_copy2 &= ~HA_R0ATT;
4447 }
4448
4449 /* Check if ring1 interrupt is enabled */
4450 if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4451 ha_copy2 &= ~HA_R1ATT;
4452 }
4453
4454 /* Check if ring2 interrupt is enabled */
4455 if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4456 ha_copy2 &= ~HA_R2ATT;
4457 }
4458
4459 /* Check if ring3 interrupt is enabled */
4460 if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4461 ha_copy2 &= ~HA_R3ATT;
4462 }
4463
4464 /* Accumulate attention bits */
4465 ha_copy |= ha_copy2;
4466
4467 /* Clear attentions except for error, link, and autoclear(MSIX) */
4468 ha_copy2 &= ~(HA_ERATT | HA_LATT); /* | hba->intr_autoClear */
4469
4470 if (ha_copy2) {
4471 WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4472 }
4473
4474 #ifdef FMA_SUPPORT
4475 /* Access handle validation */
4476 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4477 #endif /* FMA_SUPPORT */
4478
4479 return (ha_copy);
4480
4481 } /* emlxs_get_attention() */
4482
4483
4484 static void
emlxs_proc_attention(emlxs_hba_t * hba,uint32_t ha_copy)4485 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4486 {
4487 #ifdef FMA_SUPPORT
4488 emlxs_port_t *port = &PPORT;
4489 #endif /* FMA_SUPPORT */
4490
4491 /* ha_copy should be pre-filtered */
4492
4493 /*
4494 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4495 * "proc_attention: ha_copy=%x", ha_copy);
4496 */
4497
4498 if (hba->state < FC_WARM_START) {
4499 return;
4500 }
4501
4502 if (!ha_copy) {
4503 return;
4504 }
4505
4506 if (hba->bus_type == SBUS_FC) {
4507 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4508 }
4509
4510 /* Adapter error */
4511 if (ha_copy & HA_ERATT) {
4512 HBASTATS.IntrEvent[6]++;
4513 emlxs_handle_ff_error(hba);
4514 return;
4515 }
4516
4517 /* Mailbox interrupt */
4518 if (ha_copy & HA_MBATT) {
4519 HBASTATS.IntrEvent[5]++;
4520 (void) emlxs_handle_mb_event(hba);
4521 }
4522
4523 /* Link Attention interrupt */
4524 if (ha_copy & HA_LATT) {
4525 HBASTATS.IntrEvent[4]++;
4526 emlxs_sli3_handle_link_event(hba);
4527 }
4528
4529 /* event on ring 0 - FCP Ring */
4530 if (ha_copy & HA_R0ATT) {
4531 HBASTATS.IntrEvent[0]++;
4532 emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4533 }
4534
4535 /* event on ring 1 - IP Ring */
4536 if (ha_copy & HA_R1ATT) {
4537 HBASTATS.IntrEvent[1]++;
4538 emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4539 }
4540
4541 /* event on ring 2 - ELS Ring */
4542 if (ha_copy & HA_R2ATT) {
4543 HBASTATS.IntrEvent[2]++;
4544 emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4545 }
4546
4547 /* event on ring 3 - CT Ring */
4548 if (ha_copy & HA_R3ATT) {
4549 HBASTATS.IntrEvent[3]++;
4550 emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4551 }
4552
4553 if (hba->bus_type == SBUS_FC) {
4554 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4555 }
4556
4557 /* Set heartbeat flag to show activity */
4558 hba->heartbeat_flag = 1;
4559
4560 #ifdef FMA_SUPPORT
4561 if (hba->bus_type == SBUS_FC) {
4562 /* Access handle validation */
4563 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4564 }
4565 #endif /* FMA_SUPPORT */
4566
4567 return;
4568
4569 } /* emlxs_proc_attention() */
4570
4571
4572 /*
4573 * emlxs_handle_ff_error()
4574 *
4575 * Description: Processes a FireFly error
4576 * Runs at Interrupt level
4577 */
4578 static void
emlxs_handle_ff_error(emlxs_hba_t * hba)4579 emlxs_handle_ff_error(emlxs_hba_t *hba)
4580 {
4581 emlxs_port_t *port = &PPORT;
4582 uint32_t status;
4583 uint32_t status1;
4584 uint32_t status2;
4585 int i = 0;
4586
4587 /* do what needs to be done, get error from STATUS REGISTER */
4588 status = READ_CSR_REG(hba, FC_HS_REG(hba));
4589
4590 /* Clear Chip error bit */
4591 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4592
4593 /* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4594 if (status & HS_FFER1) {
4595
4596 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4597 "HS_FFER1 received");
4598 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4599 (void) emlxs_offline(hba, 1);
4600 while ((status & HS_FFER1) && (i < 300)) {
4601 status =
4602 READ_CSR_REG(hba, FC_HS_REG(hba));
4603 BUSYWAIT_MS(1000);
4604 i++;
4605 }
4606 }
4607
4608 if (i == 300) {
4609 /* 5 minutes is up, shutdown HBA */
4610 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4611 "HS_FFER1 clear timeout");
4612
4613 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4614 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4615
4616 goto done;
4617 }
4618
4619 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4620 "HS_FFER1 cleared");
4621
4622 if (status & HS_OVERTEMP) {
4623 status1 =
4624 READ_SLIM_ADDR(hba,
4625 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4626
4627 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4628 "Maximum adapter temperature exceeded (%d �C).", status1);
4629
4630 hba->temperature = status1;
4631 hba->flag |= FC_OVERTEMP_EVENT;
4632
4633 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4634 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4635 NULL, NULL);
4636
4637 } else {
4638 status1 =
4639 READ_SLIM_ADDR(hba,
4640 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4641 status2 =
4642 READ_SLIM_ADDR(hba,
4643 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4644
4645 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4646 "Host Error Attention: "
4647 "status=0x%x status1=0x%x status2=0x%x",
4648 status, status1, status2);
4649
4650 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4651
4652 if (status & HS_FFER6) {
4653 emlxs_thread_spawn(hba, emlxs_restart_thread,
4654 NULL, NULL);
4655 } else {
4656 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4657 NULL, NULL);
4658 }
4659 }
4660
4661 done:
4662 #ifdef FMA_SUPPORT
4663 /* Access handle validation */
4664 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4665 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4666 #endif /* FMA_SUPPORT */
4667
4668 return;
4669
4670 } /* emlxs_handle_ff_error() */
4671
4672
4673 /*
4674 * emlxs_sli3_handle_link_event()
4675 *
4676 * Description: Process a Link Attention.
4677 */
4678 static void
emlxs_sli3_handle_link_event(emlxs_hba_t * hba)4679 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4680 {
4681 emlxs_port_t *port = &PPORT;
4682 MAILBOXQ *mbq;
4683 int rc;
4684
4685 HBASTATS.LinkEvent++;
4686
4687 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4688 HBASTATS.LinkEvent);
4689
4690 /* Make sure link is declared down */
4691 emlxs_linkdown(hba);
4692
4693 /* Get a buffer which will be used for mailbox commands */
4694 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
4695 /* Get link attention message */
4696 if (emlxs_mb_read_la(hba, mbq) == 0) {
4697 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq,
4698 MBX_NOWAIT, 0);
4699 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4700 emlxs_mem_put(hba, MEM_MBOX,
4701 (void *)mbq);
4702 }
4703
4704 mutex_enter(&EMLXS_PORT_LOCK);
4705
4706 /*
4707 * Clear Link Attention in HA REG
4708 */
4709 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4710
4711 #ifdef FMA_SUPPORT
4712 /* Access handle validation */
4713 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4714 #endif /* FMA_SUPPORT */
4715
4716 mutex_exit(&EMLXS_PORT_LOCK);
4717 } else {
4718 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4719 }
4720 }
4721
4722 } /* emlxs_sli3_handle_link_event() */
4723
4724
4725 /*
4726 * emlxs_sli3_handle_ring_event()
4727 *
4728 * Description: Process a Ring Attention.
4729 */
4730 static void
emlxs_sli3_handle_ring_event(emlxs_hba_t * hba,int32_t ring_no,uint32_t ha_copy)4731 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4732 uint32_t ha_copy)
4733 {
4734 emlxs_port_t *port = &PPORT;
4735 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4736 CHANNEL *cp;
4737 RING *rp;
4738 IOCB *entry;
4739 IOCBQ *iocbq;
4740 IOCBQ local_iocbq;
4741 PGP *pgp;
4742 uint32_t count;
4743 volatile uint32_t chipatt;
4744 void *ioa2;
4745 uint32_t reg;
4746 uint32_t channel_no;
4747 off_t offset;
4748 IOCBQ *rsp_head = NULL;
4749 IOCBQ *rsp_tail = NULL;
4750 emlxs_buf_t *sbp = NULL;
4751
4752 count = 0;
4753 rp = &hba->sli.sli3.ring[ring_no];
4754 cp = rp->channelp;
4755 channel_no = cp->channelno;
4756
4757 /*
4758 * Isolate this ring's host attention bits
4759 * This makes all ring attention bits equal
4760 * to Ring0 attention bits
4761 */
4762 reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4763
4764 /*
4765 * Gather iocb entries off response ring.
4766 * Ensure entry is owned by the host.
4767 */
4768 pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4769 offset =
4770 (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4771 (uint64_t)((unsigned long)slim2p));
4772 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4773 DDI_DMA_SYNC_FORKERNEL);
4774 rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4775
4776 /* While ring is not empty */
4777 while (rp->fc_rspidx != rp->fc_port_rspidx) {
4778 HBASTATS.IocbReceived[channel_no]++;
4779
4780 /* Get the next response ring iocb */
4781 entry =
4782 (IOCB *)(((char *)rp->fc_rspringaddr +
4783 (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4784
4785 /* DMA sync the response ring iocb for the adapter */
4786 offset = (off_t)((uint64_t)((unsigned long)entry)
4787 - (uint64_t)((unsigned long)slim2p));
4788 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4789 hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4790
4791 count++;
4792
4793 /* Copy word6 and word7 to local iocb for now */
4794 iocbq = &local_iocbq;
4795
4796 BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4797 (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4798 (sizeof (uint32_t) * 2));
4799
4800 /* when LE is not set, entire Command has not been received */
4801 if (!iocbq->iocb.ULPLE) {
4802 /* This should never happen */
4803 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4804 "ulpLE is not set. "
4805 "ring=%d iotag=%d cmd=%x status=%x",
4806 channel_no, iocbq->iocb.ULPIOTAG,
4807 iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4808
4809 goto next;
4810 }
4811
4812 sbp = NULL;
4813 switch (iocbq->iocb.ULPCOMMAND) {
4814 #ifdef SFCT_SUPPORT
4815 case CMD_CLOSE_XRI_CX:
4816 case CMD_CLOSE_XRI_CN:
4817 case CMD_ABORT_XRI_CX:
4818 if (port->mode == MODE_TARGET) {
4819 sbp = emlxs_unregister_pkt(cp,
4820 iocbq->iocb.ULPIOTAG, 0);
4821 }
4822 break;
4823 #endif /* SFCT_SUPPORT */
4824
4825 /* Ring 0 registered commands */
4826 case CMD_FCP_ICMND_CR:
4827 case CMD_FCP_ICMND_CX:
4828 case CMD_FCP_IREAD_CR:
4829 case CMD_FCP_IREAD_CX:
4830 case CMD_FCP_IWRITE_CR:
4831 case CMD_FCP_IWRITE_CX:
4832 case CMD_FCP_ICMND64_CR:
4833 case CMD_FCP_ICMND64_CX:
4834 case CMD_FCP_IREAD64_CR:
4835 case CMD_FCP_IREAD64_CX:
4836 case CMD_FCP_IWRITE64_CR:
4837 case CMD_FCP_IWRITE64_CX:
4838 #ifdef SFCT_SUPPORT
4839 case CMD_FCP_TSEND_CX:
4840 case CMD_FCP_TSEND64_CX:
4841 case CMD_FCP_TRECEIVE_CX:
4842 case CMD_FCP_TRECEIVE64_CX:
4843 case CMD_FCP_TRSP_CX:
4844 case CMD_FCP_TRSP64_CX:
4845 #endif /* SFCT_SUPPORT */
4846
4847 /* Ring 1 registered commands */
4848 case CMD_XMIT_BCAST_CN:
4849 case CMD_XMIT_BCAST_CX:
4850 case CMD_XMIT_SEQUENCE_CX:
4851 case CMD_XMIT_SEQUENCE_CR:
4852 case CMD_XMIT_BCAST64_CN:
4853 case CMD_XMIT_BCAST64_CX:
4854 case CMD_XMIT_SEQUENCE64_CX:
4855 case CMD_XMIT_SEQUENCE64_CR:
4856 case CMD_CREATE_XRI_CR:
4857 case CMD_CREATE_XRI_CX:
4858
4859 /* Ring 2 registered commands */
4860 case CMD_ELS_REQUEST_CR:
4861 case CMD_ELS_REQUEST_CX:
4862 case CMD_XMIT_ELS_RSP_CX:
4863 case CMD_ELS_REQUEST64_CR:
4864 case CMD_ELS_REQUEST64_CX:
4865 case CMD_XMIT_ELS_RSP64_CX:
4866
4867 /* Ring 3 registered commands */
4868 case CMD_GEN_REQUEST64_CR:
4869 case CMD_GEN_REQUEST64_CX:
4870
4871 sbp =
4872 emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4873 break;
4874 }
4875
4876 /* If packet is stale, then drop it. */
4877 if (sbp == STALE_PACKET) {
4878 cp->hbaCmplCmd_sbp++;
4879 /* Copy entry to the local iocbq */
4880 BE_SWAP32_BCOPY((uint8_t *)entry,
4881 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4882
4883 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4884 "channelno=%d iocb=%p cmd=%x status=%x "
4885 "error=%x iotag=%d context=%x info=%x",
4886 channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4887 iocbq->iocb.ULPSTATUS,
4888 (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4889 (uint16_t)iocbq->iocb.ULPIOTAG,
4890 (uint16_t)iocbq->iocb.ULPCONTEXT,
4891 (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4892
4893 goto next;
4894 }
4895
4896 /*
4897 * If a packet was found, then queue the packet's
4898 * iocb for deferred processing
4899 */
4900 else if (sbp) {
4901 #ifdef SFCT_SUPPORT
4902 fct_cmd_t *fct_cmd;
4903 emlxs_buf_t *cmd_sbp;
4904
4905 fct_cmd = sbp->fct_cmd;
4906 if (fct_cmd) {
4907 cmd_sbp =
4908 (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4909 mutex_enter(&cmd_sbp->fct_mtx);
4910 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4911 EMLXS_FCT_IOCB_COMPLETE);
4912 mutex_exit(&cmd_sbp->fct_mtx);
4913 }
4914 #endif /* SFCT_SUPPORT */
4915 cp->hbaCmplCmd_sbp++;
4916 atomic_dec_32(&hba->io_active);
4917 #ifdef NODE_THROTTLE_SUPPORT
4918 if (sbp->node) {
4919 atomic_dec_32(&sbp->node->io_active);
4920 }
4921 #endif /* NODE_THROTTLE_SUPPORT */
4922
4923 /* Copy entry to sbp's iocbq */
4924 iocbq = &sbp->iocbq;
4925 BE_SWAP32_BCOPY((uint8_t *)entry,
4926 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4927
4928 iocbq->next = NULL;
4929
4930 /*
4931 * If this is NOT a polled command completion
4932 * or a driver allocated pkt, then defer pkt
4933 * completion.
4934 */
4935 if (!(sbp->pkt_flags &
4936 (PACKET_POLLED | PACKET_ALLOCATED))) {
4937 /* Add the IOCB to the local list */
4938 if (!rsp_head) {
4939 rsp_head = iocbq;
4940 } else {
4941 rsp_tail->next = iocbq;
4942 }
4943
4944 rsp_tail = iocbq;
4945
4946 goto next;
4947 }
4948 } else {
4949 cp->hbaCmplCmd++;
4950 /* Copy entry to the local iocbq */
4951 BE_SWAP32_BCOPY((uint8_t *)entry,
4952 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4953
4954 iocbq->next = NULL;
4955 iocbq->bp = NULL;
4956 iocbq->port = &PPORT;
4957 iocbq->channel = cp;
4958 iocbq->node = NULL;
4959 iocbq->sbp = NULL;
4960 iocbq->flag = 0;
4961 }
4962
4963 /* process the channel event now */
4964 emlxs_proc_channel_event(hba, cp, iocbq);
4965
4966 next:
4967 /* Increment the driver's local response get index */
4968 if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4969 rp->fc_rspidx = 0;
4970 }
4971
4972 } /* while (TRUE) */
4973
4974 if (rsp_head) {
4975 mutex_enter(&cp->rsp_lock);
4976 if (cp->rsp_head == NULL) {
4977 cp->rsp_head = rsp_head;
4978 cp->rsp_tail = rsp_tail;
4979 } else {
4980 cp->rsp_tail->next = rsp_head;
4981 cp->rsp_tail = rsp_tail;
4982 }
4983 mutex_exit(&cp->rsp_lock);
4984
4985 emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4986 }
4987
4988 /* Check if at least one response entry was processed */
4989 if (count) {
4990 /* Update response get index for the adapter */
4991 if (hba->bus_type == SBUS_FC) {
4992 slim2p->mbx.us.s2.host[channel_no].rspGetInx
4993 = BE_SWAP32(rp->fc_rspidx);
4994
4995 /* DMA sync the index for the adapter */
4996 offset = (off_t)
4997 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4998 host[channel_no].rspGetInx))
4999 - (uint64_t)((unsigned long)slim2p));
5000 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5001 offset, 4, DDI_DMA_SYNC_FORDEV);
5002 } else {
5003 ioa2 =
5004 (void *)((char *)hba->sli.sli3.slim_addr +
5005 hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
5006 1) * sizeof (uint32_t)));
5007 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
5008 rp->fc_rspidx);
5009 #ifdef FMA_SUPPORT
5010 /* Access handle validation */
5011 EMLXS_CHK_ACC_HANDLE(hba,
5012 hba->sli.sli3.slim_acc_handle);
5013 #endif /* FMA_SUPPORT */
5014 }
5015
5016 if (reg & HA_R0RE_REQ) {
5017 /* HBASTATS.chipRingFree++; */
5018
5019 mutex_enter(&EMLXS_PORT_LOCK);
5020
5021 /* Tell the adapter we serviced the ring */
5022 chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
5023 (channel_no * 4));
5024 WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
5025
5026 #ifdef FMA_SUPPORT
5027 /* Access handle validation */
5028 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5029 #endif /* FMA_SUPPORT */
5030
5031 mutex_exit(&EMLXS_PORT_LOCK);
5032 }
5033 }
5034
5035 if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
5036 /* HBASTATS.hostRingFree++; */
5037
5038 /* Cmd ring may be available. Try sending more iocbs */
5039 emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
5040 }
5041
5042 /* HBASTATS.ringEvent++; */
5043
5044 return;
5045
5046 } /* emlxs_sli3_handle_ring_event() */
5047
5048
5049 extern int
emlxs_handle_rcv_seq(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)5050 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
5051 {
5052 emlxs_port_t *port = &PPORT;
5053 IOCB *iocb;
5054 RING *rp;
5055 MATCHMAP *mp = NULL;
5056 uint64_t bdeAddr;
5057 uint32_t vpi = 0;
5058 uint32_t channelno;
5059 uint32_t size = 0;
5060 uint32_t *RcvError;
5061 uint32_t *RcvDropped;
5062 uint32_t *UbPosted;
5063 emlxs_msg_t *dropped_msg;
5064 char error_str[64];
5065 uint32_t buf_type;
5066 uint32_t *word;
5067
5068 channelno = cp->channelno;
5069 rp = &hba->sli.sli3.ring[channelno];
5070
5071 iocb = &iocbq->iocb;
5072 word = (uint32_t *)iocb;
5073
5074 switch (channelno) {
5075 #ifdef SFCT_SUPPORT
5076 case FC_FCT_RING:
5077 HBASTATS.FctRingEvent++;
5078 RcvError = &HBASTATS.FctRingError;
5079 RcvDropped = &HBASTATS.FctRingDropped;
5080 UbPosted = &HBASTATS.FctUbPosted;
5081 dropped_msg = &emlxs_fct_detail_msg;
5082 buf_type = MEM_FCTBUF;
5083 break;
5084 #endif /* SFCT_SUPPORT */
5085
5086 case FC_IP_RING:
5087 HBASTATS.IpRcvEvent++;
5088 RcvError = &HBASTATS.IpDropped;
5089 RcvDropped = &HBASTATS.IpDropped;
5090 UbPosted = &HBASTATS.IpUbPosted;
5091 dropped_msg = &emlxs_unsol_ip_dropped_msg;
5092 buf_type = MEM_IPBUF;
5093 break;
5094
5095 case FC_ELS_RING:
5096 HBASTATS.ElsRcvEvent++;
5097 RcvError = &HBASTATS.ElsRcvError;
5098 RcvDropped = &HBASTATS.ElsRcvDropped;
5099 UbPosted = &HBASTATS.ElsUbPosted;
5100 dropped_msg = &emlxs_unsol_els_dropped_msg;
5101 buf_type = MEM_ELSBUF;
5102 break;
5103
5104 case FC_CT_RING:
5105 HBASTATS.CtRcvEvent++;
5106 RcvError = &HBASTATS.CtRcvError;
5107 RcvDropped = &HBASTATS.CtRcvDropped;
5108 UbPosted = &HBASTATS.CtUbPosted;
5109 dropped_msg = &emlxs_unsol_ct_dropped_msg;
5110 buf_type = MEM_CTBUF;
5111 break;
5112
5113 default:
5114 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
5115 "channel=%d cmd=%x %s %x %x %x %x",
5116 channelno, iocb->ULPCOMMAND,
5117 emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
5118 word[6], word[7]);
5119 return (1);
5120 }
5121
5122 if (iocb->ULPSTATUS) {
5123 if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5124 (iocb->un.grsp.perr.statLocalError ==
5125 IOERR_RCV_BUFFER_TIMEOUT)) {
5126 (void) strlcpy(error_str, "Out of posted buffers:",
5127 sizeof (error_str));
5128 iocb->ULPBDECOUNT = 0;
5129 } else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5130 (iocb->un.grsp.perr.statLocalError ==
5131 IOERR_RCV_BUFFER_WAITING)) {
5132 (void) strlcpy(error_str, "Buffer waiting:",
5133 sizeof (error_str));
5134 iocb->ULPBDECOUNT = 0;
5135 goto done;
5136 } else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
5137 (void) strlcpy(error_str, "Need Buffer Entry:",
5138 sizeof (error_str));
5139 iocb->ULPBDECOUNT = 0;
5140 goto done;
5141 } else {
5142 (void) strlcpy(error_str, "General error:",
5143 sizeof (error_str));
5144 }
5145
5146 goto failed;
5147 }
5148
5149 if (hba->flag & FC_HBQ_ENABLED) {
5150 HBQ_INIT_t *hbq;
5151 HBQE_t *hbqE;
5152 uint32_t hbqe_tag;
5153 uint32_t hbq_id;
5154
5155 (*UbPosted)--;
5156
5157 hbqE = (HBQE_t *)iocb;
5158 hbq_id = hbqE->unt.ext.HBQ_tag;
5159 hbqe_tag = hbqE->unt.ext.HBQE_tag;
5160
5161 hbq = &hba->sli.sli3.hbq_table[hbq_id];
5162
5163 if (hbqe_tag >= hbq->HBQ_numEntries) {
5164 (void) snprintf(error_str, sizeof (error_str),
5165 "Invalid HBQE iotag=%d:", hbqe_tag);
5166 goto dropped;
5167 }
5168
5169 mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
5170
5171 size = iocb->unsli3.ext_rcv.seq_len;
5172 } else {
5173 bdeAddr =
5174 PADDR(iocb->un.cont64[0].addrHigh,
5175 iocb->un.cont64[0].addrLow);
5176
5177 /* Check for invalid buffer */
5178 if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
5179 (void) strlcpy(error_str, "Invalid buffer:",
5180 sizeof (error_str));
5181 goto dropped;
5182 }
5183
5184 mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
5185
5186 size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
5187 }
5188
5189 if (!mp) {
5190 (void) strlcpy(error_str, "Buffer not mapped:",
5191 sizeof (error_str));
5192 goto dropped;
5193 }
5194
5195 #ifdef FMA_SUPPORT
5196 if (mp->dma_handle) {
5197 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
5198 != DDI_FM_OK) {
5199 EMLXS_MSGF(EMLXS_CONTEXT,
5200 &emlxs_invalid_dma_handle_msg,
5201 "handle_rcv_seq: hdl=%p",
5202 mp->dma_handle);
5203 goto dropped;
5204 }
5205 }
5206 #endif /* FMA_SUPPORT */
5207
5208 if (!size) {
5209 (void) strlcpy(error_str, "Buffer empty:", sizeof (error_str));
5210 goto dropped;
5211 }
5212
5213 /* To avoid we drop the broadcast packets */
5214 if (channelno != FC_IP_RING) {
5215 /* Get virtual port */
5216 if (hba->flag & FC_NPIV_ENABLED) {
5217 vpi = iocb->unsli3.ext_rcv.vpi;
5218 if (vpi >= hba->vpi_max) {
5219 (void) snprintf(error_str, sizeof (error_str),
5220 "Invalid VPI=%d:", vpi);
5221 goto dropped;
5222 }
5223
5224 port = &VPORT(vpi);
5225 }
5226 }
5227
5228 /* Process request */
5229 switch (channelno) {
5230 case FC_FCT_RING:
5231 if (port->mode == MODE_INITIATOR) {
5232 (void) strlcpy(error_str, "Target mode disabled:",
5233 sizeof (error_str));
5234 goto dropped;
5235 #ifdef SFCT_SUPPORT
5236 } else if (port->mode == MODE_TARGET) {
5237 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp,
5238 size);
5239 #endif /* SFCT_SUPPORT */
5240 } else {
5241 (void) snprintf(error_str, sizeof (error_str),
5242 "Invalid mode=%x:", port->mode);
5243 goto dropped;
5244 }
5245 break;
5246
5247 case FC_IP_RING:
5248 if (port->mode == MODE_INITIATOR) {
5249 (void) emlxs_ip_handle_unsol_req(port, cp, iocbq,
5250 mp, size);
5251 #ifdef SFCT_SUPPORT
5252 } else if (port->mode == MODE_TARGET) {
5253 (void) strlcpy(error_str, "Initiator mode disabled:",
5254 sizeof (error_str));
5255 goto dropped;
5256 #endif /* SFCT_SUPPORT */
5257 } else {
5258 (void) snprintf(error_str, sizeof (error_str),
5259 "Invalid mode=%x:", port->mode);
5260 goto dropped;
5261 }
5262 break;
5263
5264 case FC_ELS_RING:
5265 if (port->mode == MODE_INITIATOR) {
5266 (void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5267 size);
5268 #ifdef SFCT_SUPPORT
5269 } else if (port->mode == MODE_TARGET) {
5270 (void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5271 size);
5272 #endif /* SFCT_SUPPORT */
5273 } else {
5274 (void) snprintf(error_str, sizeof (error_str),
5275 "Invalid mode=%x:", port->mode);
5276 goto dropped;
5277 }
5278 break;
5279
5280 case FC_CT_RING:
5281 (void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5282 break;
5283 }
5284
5285 goto done;
5286
5287 dropped:
5288 (*RcvDropped)++;
5289
5290 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5291 "%s: cmd=%x %s %x %x %x %x",
5292 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5293 word[4], word[5], word[6], word[7]);
5294
5295 if (channelno == FC_FCT_RING) {
5296 uint32_t sid;
5297
5298 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
5299 emlxs_node_t *ndlp;
5300 ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5301 if (! ndlp) {
5302 goto done;
5303 }
5304 sid = ndlp->nlp_DID;
5305 } else {
5306 sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5307 }
5308
5309 emlxs_send_logo(port, sid);
5310 }
5311
5312 goto done;
5313
5314 failed:
5315 (*RcvError)++;
5316
5317 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5318 "%s: cmd=%x %s %x %x %x %x hba:%x %x",
5319 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5320 word[4], word[5], word[6], word[7], hba->state, hba->flag);
5321
5322 done:
5323
5324 if (hba->flag & FC_HBQ_ENABLED) {
5325 if (iocb->ULPBDECOUNT) {
5326 HBQE_t *hbqE;
5327 uint32_t hbq_id;
5328
5329 hbqE = (HBQE_t *)iocb;
5330 hbq_id = hbqE->unt.ext.HBQ_tag;
5331
5332 emlxs_update_HBQ_index(hba, hbq_id);
5333 }
5334 } else {
5335 if (mp) {
5336 emlxs_mem_put(hba, buf_type, (void *)mp);
5337 }
5338
5339 if (iocb->ULPBDECOUNT) {
5340 (void) emlxs_post_buffer(hba, rp, 1);
5341 }
5342 }
5343
5344 return (0);
5345
5346 } /* emlxs_handle_rcv_seq() */
5347
5348
5349 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5350 static void
emlxs_sli3_issue_iocb(emlxs_hba_t * hba,RING * rp,IOCBQ * iocbq)5351 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5352 {
5353 emlxs_port_t *port;
5354 IOCB *icmd;
5355 IOCB *iocb;
5356 emlxs_buf_t *sbp;
5357 off_t offset;
5358 uint32_t ringno;
5359
5360 ringno = rp->ringno;
5361 sbp = iocbq->sbp;
5362 icmd = &iocbq->iocb;
5363 port = iocbq->port;
5364
5365 HBASTATS.IocbIssued[ringno]++;
5366
5367 /* Check for ULP pkt request */
5368 if (sbp) {
5369 mutex_enter(&sbp->mtx);
5370
5371 if (sbp->node == NULL) {
5372 /* Set node to base node by default */
5373 iocbq->node = (void *)&port->node_base;
5374 sbp->node = (void *)&port->node_base;
5375 }
5376
5377 sbp->pkt_flags |= PACKET_IN_CHIPQ;
5378 mutex_exit(&sbp->mtx);
5379
5380 atomic_inc_32(&hba->io_active);
5381 #ifdef NODE_THROTTLE_SUPPORT
5382 if (sbp->node) {
5383 atomic_inc_32(&sbp->node->io_active);
5384 }
5385 #endif /* NODE_THROTTLE_SUPPORT */
5386
5387 #ifdef SFCT_SUPPORT
5388 #ifdef FCT_IO_TRACE
5389 if (sbp->fct_cmd) {
5390 emlxs_fct_io_trace(port, sbp->fct_cmd,
5391 EMLXS_FCT_IOCB_ISSUED);
5392 emlxs_fct_io_trace(port, sbp->fct_cmd,
5393 icmd->ULPCOMMAND);
5394 }
5395 #endif /* FCT_IO_TRACE */
5396 #endif /* SFCT_SUPPORT */
5397
5398 rp->channelp->hbaSendCmd_sbp++;
5399 iocbq->channel = rp->channelp;
5400 } else {
5401 rp->channelp->hbaSendCmd++;
5402 }
5403
5404 /* get the next available command ring iocb */
5405 iocb =
5406 (IOCB *)(((char *)rp->fc_cmdringaddr +
5407 (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5408
5409 /* Copy the local iocb to the command ring iocb */
5410 BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5411 hba->sli.sli3.iocb_cmd_size);
5412
5413 /* DMA sync the command ring iocb for the adapter */
5414 offset = (off_t)((uint64_t)((unsigned long)iocb)
5415 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5416 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5417 hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5418
5419 /*
5420 * After this, the sbp / iocb should not be
5421 * accessed in the xmit path.
5422 */
5423
5424 /* Free the local iocb if there is no sbp tracking it */
5425 if (!sbp) {
5426 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
5427 }
5428
5429 /* update local ring index to next available ring index */
5430 rp->fc_cmdidx =
5431 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5432
5433
5434 return;
5435
5436 } /* emlxs_sli3_issue_iocb() */
5437
5438
5439 static void
emlxs_sli3_hba_kill(emlxs_hba_t * hba)5440 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5441 {
5442 emlxs_port_t *port = &PPORT;
5443 MAILBOX *swpmb;
5444 MAILBOX *mb2;
5445 MAILBOX *mb1;
5446 uint32_t word0;
5447 uint32_t j;
5448 uint32_t interlock_failed;
5449 uint32_t ha_copy;
5450 uint32_t value;
5451 off_t offset;
5452 uint32_t size;
5453
5454 /* Perform adapter interlock to kill adapter */
5455 interlock_failed = 0;
5456
5457 mutex_enter(&EMLXS_PORT_LOCK);
5458 if (hba->flag & FC_INTERLOCKED) {
5459 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5460
5461 mutex_exit(&EMLXS_PORT_LOCK);
5462
5463 return;
5464 }
5465
5466 j = 0;
5467 while (j++ < 10000) {
5468 if (hba->mbox_queue_flag == 0) {
5469 break;
5470 }
5471
5472 mutex_exit(&EMLXS_PORT_LOCK);
5473 BUSYWAIT_US(100);
5474 mutex_enter(&EMLXS_PORT_LOCK);
5475 }
5476
5477 if (hba->mbox_queue_flag != 0) {
5478 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5479 "Interlock failed. Mailbox busy.");
5480 mutex_exit(&EMLXS_PORT_LOCK);
5481 return;
5482 }
5483
5484 hba->flag |= FC_INTERLOCKED;
5485 hba->mbox_queue_flag = 1;
5486
5487 /* Disable all host interrupts */
5488 hba->sli.sli3.hc_copy = 0;
5489 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5490 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5491
5492 mb2 = FC_SLIM2_MAILBOX(hba);
5493 mb1 = FC_SLIM1_MAILBOX(hba);
5494 swpmb = (MAILBOX *)&word0;
5495
5496 if (!(hba->flag & FC_SLIM2_MODE)) {
5497 goto mode_B;
5498 }
5499
5500 mode_A:
5501
5502 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5503 "Attempting SLIM2 Interlock...");
5504
5505 interlock_A:
5506
5507 value = 0x55555555;
5508 word0 = 0;
5509 swpmb->mbxCommand = MBX_KILL_BOARD;
5510 swpmb->mbxOwner = OWN_CHIP;
5511
5512 /* Write value to SLIM */
5513 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5514 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5515
5516 /* Send Kill board request */
5517 mb2->un.varWords[0] = value;
5518 mb2->mbxCommand = MBX_KILL_BOARD;
5519 mb2->mbxOwner = OWN_CHIP;
5520
5521 /* Sync the memory */
5522 offset = (off_t)((uint64_t)((unsigned long)mb2)
5523 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5524 size = (sizeof (uint32_t) * 2);
5525
5526 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5527
5528 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5529 DDI_DMA_SYNC_FORDEV);
5530
5531 /* interrupt board to do it right away */
5532 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5533
5534 /* First wait for command acceptence */
5535 j = 0;
5536 while (j++ < 1000) {
5537 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5538
5539 if (value == 0xAAAAAAAA) {
5540 break;
5541 }
5542
5543 BUSYWAIT_US(50);
5544 }
5545
5546 if (value == 0xAAAAAAAA) {
5547 /* Now wait for mailbox ownership to clear */
5548 while (j++ < 10000) {
5549 word0 =
5550 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5551
5552 if (swpmb->mbxOwner == 0) {
5553 break;
5554 }
5555
5556 BUSYWAIT_US(50);
5557 }
5558
5559 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5560 "Interlock succeeded.");
5561
5562 goto done;
5563 }
5564
5565 /* Interlock failed !!! */
5566 interlock_failed = 1;
5567
5568 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5569
5570 mode_B:
5571
5572 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5573 "Attempting SLIM1 Interlock...");
5574
5575 interlock_B:
5576
5577 value = 0x55555555;
5578 word0 = 0;
5579 swpmb->mbxCommand = MBX_KILL_BOARD;
5580 swpmb->mbxOwner = OWN_CHIP;
5581
5582 /* Write KILL BOARD to mailbox */
5583 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5584 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5585
5586 /* interrupt board to do it right away */
5587 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5588
5589 /* First wait for command acceptence */
5590 j = 0;
5591 while (j++ < 1000) {
5592 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5593
5594 if (value == 0xAAAAAAAA) {
5595 break;
5596 }
5597
5598 BUSYWAIT_US(50);
5599 }
5600
5601 if (value == 0xAAAAAAAA) {
5602 /* Now wait for mailbox ownership to clear */
5603 while (j++ < 10000) {
5604 word0 =
5605 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5606
5607 if (swpmb->mbxOwner == 0) {
5608 break;
5609 }
5610
5611 BUSYWAIT_US(50);
5612 }
5613
5614 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5615 "Interlock succeeded.");
5616
5617 goto done;
5618 }
5619
5620 /* Interlock failed !!! */
5621
5622 /* If this is the first time then try again */
5623 if (interlock_failed == 0) {
5624 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5625 "Interlock failed. Retrying...");
5626
5627 /* Try again */
5628 interlock_failed = 1;
5629 goto interlock_B;
5630 }
5631
5632 /*
5633 * Now check for error attention to indicate the board has
5634 * been kiilled
5635 */
5636 j = 0;
5637 while (j++ < 10000) {
5638 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5639
5640 if (ha_copy & HA_ERATT) {
5641 break;
5642 }
5643
5644 BUSYWAIT_US(50);
5645 }
5646
5647 if (ha_copy & HA_ERATT) {
5648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5649 "Interlock failed. Board killed.");
5650 } else {
5651 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5652 "Interlock failed. Board not killed.");
5653 }
5654
5655 done:
5656
5657 hba->mbox_queue_flag = 0;
5658
5659 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5660
5661 #ifdef FMA_SUPPORT
5662 /* Access handle validation */
5663 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5664 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5665 #endif /* FMA_SUPPORT */
5666
5667 mutex_exit(&EMLXS_PORT_LOCK);
5668
5669 return;
5670
5671 } /* emlxs_sli3_hba_kill() */
5672
5673
5674 static void
emlxs_sli3_hba_kill4quiesce(emlxs_hba_t * hba)5675 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5676 {
5677 emlxs_port_t *port = &PPORT;
5678 MAILBOX *swpmb;
5679 MAILBOX *mb2;
5680 MAILBOX *mb1;
5681 uint32_t word0;
5682 off_t offset;
5683 uint32_t j;
5684 uint32_t value;
5685 uint32_t size;
5686
5687 /* Disable all host interrupts */
5688 hba->sli.sli3.hc_copy = 0;
5689 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5690 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5691
5692 mb2 = FC_SLIM2_MAILBOX(hba);
5693 mb1 = FC_SLIM1_MAILBOX(hba);
5694 swpmb = (MAILBOX *)&word0;
5695
5696 value = 0x55555555;
5697 word0 = 0;
5698 swpmb->mbxCommand = MBX_KILL_BOARD;
5699 swpmb->mbxOwner = OWN_CHIP;
5700
5701 /* Write value to SLIM */
5702 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5703 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5704
5705 /* Send Kill board request */
5706 mb2->un.varWords[0] = value;
5707 mb2->mbxCommand = MBX_KILL_BOARD;
5708 mb2->mbxOwner = OWN_CHIP;
5709
5710 /* Sync the memory */
5711 offset = (off_t)((uint64_t)((unsigned long)mb2)
5712 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5713 size = (sizeof (uint32_t) * 2);
5714
5715 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5716
5717 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5718 DDI_DMA_SYNC_FORDEV);
5719
5720 /* interrupt board to do it right away */
5721 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5722
5723 /* First wait for command acceptence */
5724 j = 0;
5725 while (j++ < 1000) {
5726 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5727
5728 if (value == 0xAAAAAAAA) {
5729 break;
5730 }
5731 BUSYWAIT_US(50);
5732 }
5733 if (value == 0xAAAAAAAA) {
5734 /* Now wait for mailbox ownership to clear */
5735 while (j++ < 10000) {
5736 word0 =
5737 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5738 if (swpmb->mbxOwner == 0) {
5739 break;
5740 }
5741 BUSYWAIT_US(50);
5742 }
5743 goto done;
5744 }
5745
5746 done:
5747 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5748
5749 #ifdef FMA_SUPPORT
5750 /* Access handle validation */
5751 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5752 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5753 #endif /* FMA_SUPPORT */
5754 return;
5755
5756 } /* emlxs_sli3_hba_kill4quiesce */
5757
5758
5759
5760
5761 /*
5762 * emlxs_handle_mb_event
5763 *
5764 * Description: Process a Mailbox Attention.
5765 * Called from host_interrupt to process MBATT
5766 *
5767 * Returns:
5768 *
5769 */
5770 static uint32_t
emlxs_handle_mb_event(emlxs_hba_t * hba)5771 emlxs_handle_mb_event(emlxs_hba_t *hba)
5772 {
5773 emlxs_port_t *port = &PPORT;
5774 MAILBOX *mb;
5775 MAILBOX *swpmb;
5776 MAILBOX *mbox;
5777 MAILBOXQ *mbq = NULL;
5778 volatile uint32_t word0;
5779 MATCHMAP *mbox_bp;
5780 off_t offset;
5781 uint32_t i;
5782 int rc;
5783
5784 swpmb = (MAILBOX *)&word0;
5785
5786 mutex_enter(&EMLXS_PORT_LOCK);
5787 switch (hba->mbox_queue_flag) {
5788 case 0:
5789 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5790 "No mailbox active.");
5791
5792 mutex_exit(&EMLXS_PORT_LOCK);
5793 return (0);
5794
5795 case MBX_POLL:
5796
5797 /* Mark mailbox complete, this should wake up any polling */
5798 /* threads. This can happen if interrupts are enabled while */
5799 /* a polled mailbox command is outstanding. If we don't set */
5800 /* MBQ_COMPLETED here, the polling thread may wait until */
5801 /* timeout error occurs */
5802
5803 mutex_enter(&EMLXS_MBOX_LOCK);
5804 mbq = (MAILBOXQ *)hba->mbox_mbq;
5805 if (mbq) {
5806 port = (emlxs_port_t *)mbq->port;
5807 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5808 "Mailbox event. Completing Polled command.");
5809 mbq->flag |= MBQ_COMPLETED;
5810 }
5811 mutex_exit(&EMLXS_MBOX_LOCK);
5812
5813 mutex_exit(&EMLXS_PORT_LOCK);
5814 return (0);
5815
5816 case MBX_SLEEP:
5817 case MBX_NOWAIT:
5818 /* Check mbox_timer, it acts as a service flag too */
5819 /* The first to service the mbox queue will clear the timer */
5820 if (hba->mbox_timer) {
5821 hba->mbox_timer = 0;
5822
5823 mutex_enter(&EMLXS_MBOX_LOCK);
5824 mbq = (MAILBOXQ *)hba->mbox_mbq;
5825 mutex_exit(&EMLXS_MBOX_LOCK);
5826 }
5827
5828 if (!mbq) {
5829 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5830 "Mailbox event. No service required.");
5831 mutex_exit(&EMLXS_PORT_LOCK);
5832 return (0);
5833 }
5834
5835 mb = (MAILBOX *)mbq;
5836 mutex_exit(&EMLXS_PORT_LOCK);
5837 break;
5838
5839 default:
5840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5841 "Invalid Mailbox flag (%x).");
5842
5843 mutex_exit(&EMLXS_PORT_LOCK);
5844 return (0);
5845 }
5846
5847 /* Set port context */
5848 port = (emlxs_port_t *)mbq->port;
5849
5850 /* Get first word of mailbox */
5851 if (hba->flag & FC_SLIM2_MODE) {
5852 mbox = FC_SLIM2_MAILBOX(hba);
5853 offset = (off_t)((uint64_t)((unsigned long)mbox)
5854 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5855
5856 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5857 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5858 word0 = *((volatile uint32_t *)mbox);
5859 word0 = BE_SWAP32(word0);
5860 } else {
5861 mbox = FC_SLIM1_MAILBOX(hba);
5862 word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5863 }
5864
5865 i = 0;
5866 while (swpmb->mbxOwner == OWN_CHIP) {
5867 if (i++ > 10000) {
5868 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5869 "OWN_CHIP: %s: status=%x",
5870 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5871 swpmb->mbxStatus);
5872
5873 return (1);
5874 }
5875
5876 /* Get first word of mailbox */
5877 if (hba->flag & FC_SLIM2_MODE) {
5878 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5879 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5880 word0 = *((volatile uint32_t *)mbox);
5881 word0 = BE_SWAP32(word0);
5882 } else {
5883 word0 =
5884 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5885 }
5886 }
5887
5888 /* Now that we are the owner, DMA Sync entire mailbox if needed */
5889 if (hba->flag & FC_SLIM2_MODE) {
5890 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5891 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5892
5893 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5894 MAILBOX_CMD_BSIZE);
5895 } else {
5896 READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5897 MAILBOX_CMD_WSIZE);
5898 }
5899
5900 #ifdef MBOX_EXT_SUPPORT
5901 if (mbq->extbuf) {
5902 uint32_t *mbox_ext =
5903 (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5904 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
5905
5906 if (hba->flag & FC_SLIM2_MODE) {
5907 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5908 offset_ext, mbq->extsize,
5909 DDI_DMA_SYNC_FORKERNEL);
5910 BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5911 (uint8_t *)mbq->extbuf, mbq->extsize);
5912 } else {
5913 READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5914 mbox_ext, (mbq->extsize / 4));
5915 }
5916 }
5917 #endif /* MBOX_EXT_SUPPORT */
5918
5919 #ifdef FMA_SUPPORT
5920 if (!(hba->flag & FC_SLIM2_MODE)) {
5921 /* Access handle validation */
5922 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5923 }
5924 #endif /* FMA_SUPPORT */
5925
5926 /* Now sync the memory buffer if one was used */
5927 if (mbq->bp) {
5928 mbox_bp = (MATCHMAP *)mbq->bp;
5929 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5930 DDI_DMA_SYNC_FORKERNEL);
5931 }
5932
5933 /* Mailbox has been completely received at this point */
5934
5935 if (mb->mbxCommand == MBX_HEARTBEAT) {
5936 hba->heartbeat_active = 0;
5937 goto done;
5938 }
5939
5940 if (hba->mbox_queue_flag == MBX_SLEEP) {
5941 if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5942 swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5944 "Received. %s: status=%x Sleep.",
5945 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5946 swpmb->mbxStatus);
5947 }
5948 } else {
5949 if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5950 swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5951 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5952 "Completed. %s: status=%x",
5953 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5954 swpmb->mbxStatus);
5955 }
5956 }
5957
5958 /* Filter out passthru mailbox */
5959 if (mbq->flag & MBQ_PASSTHRU) {
5960 goto done;
5961 }
5962
5963 if (mb->mbxStatus) {
5964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5965 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5966 (uint32_t)mb->mbxStatus);
5967 }
5968
5969 if (mbq->mbox_cmpl) {
5970 rc = (mbq->mbox_cmpl)(hba, mbq);
5971 /* If mbox was retried, return immediately */
5972 if (rc) {
5973 return (0);
5974 }
5975 }
5976
5977 done:
5978
5979 /* Clean up the mailbox area */
5980 emlxs_mb_fini(hba, mb, mb->mbxStatus);
5981
5982 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5983 if (mbq) {
5984 /* Attempt to send pending mailboxes */
5985 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5986 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5987 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5988 }
5989 }
5990 return (0);
5991
5992 } /* emlxs_handle_mb_event() */
5993
5994
5995 static void
emlxs_sli3_timer(emlxs_hba_t * hba)5996 emlxs_sli3_timer(emlxs_hba_t *hba)
5997 {
5998 /* Perform SLI3 level timer checks */
5999
6000 emlxs_sli3_timer_check_mbox(hba);
6001
6002 } /* emlxs_sli3_timer() */
6003
6004
6005 static void
emlxs_sli3_timer_check_mbox(emlxs_hba_t * hba)6006 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
6007 {
6008 emlxs_port_t *port = &PPORT;
6009 emlxs_config_t *cfg = &CFG;
6010 MAILBOX *mb = NULL;
6011 uint32_t word0;
6012 uint32_t offset;
6013 uint32_t ha_copy = 0;
6014
6015 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6016 return;
6017 }
6018
6019 mutex_enter(&EMLXS_PORT_LOCK);
6020
6021 /* Return if timer hasn't expired */
6022 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6023 mutex_exit(&EMLXS_PORT_LOCK);
6024 return;
6025 }
6026
6027 /* Mailbox timed out, first check for error attention */
6028 ha_copy = emlxs_check_attention(hba);
6029
6030 if (ha_copy & HA_ERATT) {
6031 hba->mbox_timer = 0;
6032 mutex_exit(&EMLXS_PORT_LOCK);
6033 emlxs_handle_ff_error(hba);
6034 return;
6035 }
6036
6037 if (hba->mbox_queue_flag) {
6038 /* Get first word of mailbox */
6039 if (hba->flag & FC_SLIM2_MODE) {
6040 mb = FC_SLIM2_MAILBOX(hba);
6041 offset =
6042 (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
6043 ((unsigned long)hba->sli.sli3.slim2.virt));
6044
6045 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
6046 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
6047 word0 = *((volatile uint32_t *)mb);
6048 word0 = BE_SWAP32(word0);
6049 } else {
6050 mb = FC_SLIM1_MAILBOX(hba);
6051 word0 =
6052 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
6053 #ifdef FMA_SUPPORT
6054 /* Access handle validation */
6055 EMLXS_CHK_ACC_HANDLE(hba,
6056 hba->sli.sli3.slim_acc_handle);
6057 #endif /* FMA_SUPPORT */
6058 }
6059
6060 mb = (MAILBOX *)&word0;
6061
6062 /* Check if mailbox has actually completed */
6063 if (mb->mbxOwner == OWN_HOST) {
6064 /* Read host attention register to determine */
6065 /* interrupt source */
6066 uint32_t ha_copy = emlxs_check_attention(hba);
6067
6068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
6069 "Mailbox attention missed: %s. Forcing event. "
6070 "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
6071 hba->sli.sli3.hc_copy, ha_copy);
6072
6073 mutex_exit(&EMLXS_PORT_LOCK);
6074
6075 (void) emlxs_handle_mb_event(hba);
6076
6077 return;
6078 }
6079
6080 /* The first to service the mbox queue will clear the timer */
6081 /* We will service the mailbox here */
6082 hba->mbox_timer = 0;
6083
6084 mutex_enter(&EMLXS_MBOX_LOCK);
6085 mb = (MAILBOX *)hba->mbox_mbq;
6086 mutex_exit(&EMLXS_MBOX_LOCK);
6087 }
6088
6089 if (mb) {
6090 switch (hba->mbox_queue_flag) {
6091 case MBX_NOWAIT:
6092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6093 "%s: Nowait.",
6094 emlxs_mb_cmd_xlate(mb->mbxCommand));
6095 break;
6096
6097 case MBX_SLEEP:
6098 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6099 "%s: mb=%p Sleep.",
6100 emlxs_mb_cmd_xlate(mb->mbxCommand),
6101 mb);
6102 break;
6103
6104 case MBX_POLL:
6105 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6106 "%s: mb=%p Polled.",
6107 emlxs_mb_cmd_xlate(mb->mbxCommand),
6108 mb);
6109 break;
6110
6111 default:
6112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6113 "%s: mb=%p (%d).",
6114 emlxs_mb_cmd_xlate(mb->mbxCommand),
6115 mb, hba->mbox_queue_flag);
6116 break;
6117 }
6118 } else {
6119 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6120 }
6121
6122 hba->flag |= FC_MBOX_TIMEOUT;
6123 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6124
6125 mutex_exit(&EMLXS_PORT_LOCK);
6126
6127 /* Perform mailbox cleanup */
6128 /* This will wake any sleeping or polling threads */
6129 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6130
6131 /* Trigger adapter shutdown */
6132 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6133
6134 return;
6135
6136 } /* emlxs_sli3_timer_check_mbox() */
6137
6138
6139 /*
6140 * emlxs_mb_config_port Issue a CONFIG_PORT mailbox command
6141 */
6142 static uint32_t
emlxs_mb_config_port(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t sli_mode,uint32_t hbainit)6143 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
6144 uint32_t hbainit)
6145 {
6146 MAILBOX *mb = (MAILBOX *)mbq;
6147 emlxs_vpd_t *vpd = &VPD;
6148 emlxs_port_t *port = &PPORT;
6149 emlxs_config_t *cfg;
6150 RING *rp;
6151 uint64_t pcb;
6152 uint64_t mbx;
6153 uint64_t hgp;
6154 uint64_t pgp;
6155 uint64_t rgp;
6156 MAILBOX *mbox;
6157 SLIM2 *slim;
6158 SLI2_RDSC *rdsc;
6159 uint64_t offset;
6160 uint32_t Laddr;
6161 uint32_t i;
6162
6163 cfg = &CFG;
6164 bzero((void *)mb, MAILBOX_CMD_BSIZE);
6165 mbox = NULL;
6166 slim = NULL;
6167
6168 mb->mbxCommand = MBX_CONFIG_PORT;
6169 mb->mbxOwner = OWN_HOST;
6170 mbq->mbox_cmpl = NULL;
6171
6172 mb->un.varCfgPort.pcbLen = sizeof (PCB);
6173 mb->un.varCfgPort.hbainit[0] = hbainit;
6174
6175 pcb = hba->sli.sli3.slim2.phys +
6176 (uint64_t)((unsigned long)&(slim->pcb));
6177 mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
6178 mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
6179
6180 /* Set Host pointers in SLIM flag */
6181 mb->un.varCfgPort.hps = 1;
6182
6183 /* Initialize hba structure for assumed default SLI2 mode */
6184 /* If config port succeeds, then we will update it then */
6185 hba->sli_mode = sli_mode;
6186 hba->vpi_max = 0;
6187 hba->flag &= ~FC_NPIV_ENABLED;
6188
6189 if (sli_mode == EMLXS_HBA_SLI3_MODE) {
6190 mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
6191 mb->un.varCfgPort.cerbm = 1;
6192 mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
6193
6194 if (cfg[CFG_NPIV_ENABLE].current) {
6195 if (vpd->feaLevelHigh >= 0x09) {
6196 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
6197 mb->un.varCfgPort.vpi_max =
6198 MAX_VPORTS - 1;
6199 } else {
6200 mb->un.varCfgPort.vpi_max =
6201 MAX_VPORTS_LIMITED - 1;
6202 }
6203
6204 mb->un.varCfgPort.cmv = 1;
6205 } else {
6206 EMLXS_MSGF(EMLXS_CONTEXT,
6207 &emlxs_init_debug_msg,
6208 "CFGPORT: Firmware does not support NPIV. "
6209 "level=%d", vpd->feaLevelHigh);
6210 }
6211
6212 }
6213 }
6214
6215 /*
6216 * Now setup pcb
6217 */
6218 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6219 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6220 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6221 (hba->sli.sli3.ring_count - 1);
6222 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6223 sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6224
6225 mbx = hba->sli.sli3.slim2.phys +
6226 (uint64_t)((unsigned long)&(slim->mbx));
6227 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6228 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6229
6230
6231 /*
6232 * Set up HGP - Port Memory
6233 *
6234 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
6235 * RR0Get 0xc4 0x84
6236 * CR1Put 0xc8 0x88
6237 * RR1Get 0xcc 0x8c
6238 * CR2Put 0xd0 0x90
6239 * RR2Get 0xd4 0x94
6240 * CR3Put 0xd8 0x98
6241 * RR3Get 0xdc 0x9c
6242 *
6243 * Reserved 0xa0-0xbf
6244 *
6245 * If HBQs configured:
6246 * HBQ 0 Put ptr 0xc0
6247 * HBQ 1 Put ptr 0xc4
6248 * HBQ 2 Put ptr 0xc8
6249 * ...
6250 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6251 */
6252
6253 if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6254 /* ERBM is enabled */
6255 hba->sli.sli3.hgp_ring_offset = 0x80;
6256 hba->sli.sli3.hgp_hbq_offset = 0xC0;
6257
6258 hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6259 hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6260
6261 } else { /* SLI2 */
6262 /* ERBM is disabled */
6263 hba->sli.sli3.hgp_ring_offset = 0xC0;
6264 hba->sli.sli3.hgp_hbq_offset = 0;
6265
6266 hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6267 hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6268 }
6269
6270 /* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6271 if (hba->bus_type == SBUS_FC) {
6272 hgp = hba->sli.sli3.slim2.phys +
6273 (uint64_t)((unsigned long)&(mbox->us.s2.host));
6274 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6275 PADDR_HI(hgp);
6276 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6277 PADDR_LO(hgp);
6278 } else {
6279 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6280 (uint32_t)ddi_get32(hba->pci_acc_handle,
6281 (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6282
6283 Laddr =
6284 ddi_get32(hba->pci_acc_handle,
6285 (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6286 Laddr &= ~0x4;
6287 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6288 (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6289
6290 #ifdef FMA_SUPPORT
6291 /* Access handle validation */
6292 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6293 #endif /* FMA_SUPPORT */
6294
6295 }
6296
6297 pgp = hba->sli.sli3.slim2.phys +
6298 (uint64_t)((unsigned long)&(mbox->us.s2.port));
6299 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6300 PADDR_HI(pgp);
6301 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6302 PADDR_LO(pgp);
6303
6304 offset = 0;
6305 for (i = 0; i < 4; i++) {
6306 rp = &hba->sli.sli3.ring[i];
6307 rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6308
6309 /* Setup command ring */
6310 rgp = hba->sli.sli3.slim2.phys +
6311 (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6312 rdsc->cmdAddrHigh = PADDR_HI(rgp);
6313 rdsc->cmdAddrLow = PADDR_LO(rgp);
6314 rdsc->cmdEntries = rp->fc_numCiocb;
6315
6316 rp->fc_cmdringaddr =
6317 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6318 offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6319
6320 /* Setup response ring */
6321 rgp = hba->sli.sli3.slim2.phys +
6322 (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6323 rdsc->rspAddrHigh = PADDR_HI(rgp);
6324 rdsc->rspAddrLow = PADDR_LO(rgp);
6325 rdsc->rspEntries = rp->fc_numRiocb;
6326
6327 rp->fc_rspringaddr =
6328 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6329 offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6330 }
6331
6332 BE_SWAP32_BCOPY((uint8_t *)
6333 (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6334 (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6335 sizeof (PCB));
6336
6337 offset = ((uint64_t)((unsigned long)
6338 &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6339 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6340 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6341 sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6342
6343 return (0);
6344
6345 } /* emlxs_mb_config_port() */
6346
6347
6348 static uint32_t
emlxs_hbq_setup(emlxs_hba_t * hba,uint32_t hbq_id)6349 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6350 {
6351 emlxs_port_t *port = &PPORT;
6352 HBQ_INIT_t *hbq;
6353 MATCHMAP *mp;
6354 HBQE_t *hbqE;
6355 MAILBOX *mb;
6356 MAILBOXQ *mbq;
6357 void *ioa2;
6358 uint32_t j;
6359 uint32_t count;
6360 uint32_t size;
6361 uint32_t ringno;
6362 uint32_t seg;
6363
6364 switch (hbq_id) {
6365 case EMLXS_ELS_HBQ_ID:
6366 count = MEM_ELSBUF_COUNT;
6367 size = MEM_ELSBUF_SIZE;
6368 ringno = FC_ELS_RING;
6369 seg = MEM_ELSBUF;
6370 HBASTATS.ElsUbPosted = count;
6371 break;
6372
6373 case EMLXS_IP_HBQ_ID:
6374 count = MEM_IPBUF_COUNT;
6375 size = MEM_IPBUF_SIZE;
6376 ringno = FC_IP_RING;
6377 seg = MEM_IPBUF;
6378 HBASTATS.IpUbPosted = count;
6379 break;
6380
6381 case EMLXS_CT_HBQ_ID:
6382 count = MEM_CTBUF_COUNT;
6383 size = MEM_CTBUF_SIZE;
6384 ringno = FC_CT_RING;
6385 seg = MEM_CTBUF;
6386 HBASTATS.CtUbPosted = count;
6387 break;
6388
6389 #ifdef SFCT_SUPPORT
6390 case EMLXS_FCT_HBQ_ID:
6391 count = MEM_FCTBUF_COUNT;
6392 size = MEM_FCTBUF_SIZE;
6393 ringno = FC_FCT_RING;
6394 seg = MEM_FCTBUF;
6395 HBASTATS.FctUbPosted = count;
6396 break;
6397 #endif /* SFCT_SUPPORT */
6398
6399 default:
6400 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6401 "hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6402 return (1);
6403 }
6404
6405 /* Configure HBQ */
6406 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6407 hbq->HBQ_numEntries = count;
6408
6409 /* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6410 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == 0) {
6411 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6412 "hbq_setup: Unable to get mailbox.");
6413 return (1);
6414 }
6415 mb = (MAILBOX *)mbq;
6416
6417 /* Allocate HBQ Host buffer and Initialize the HBQEs */
6418 if (emlxs_hbq_alloc(hba, hbq_id)) {
6419 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6420 "hbq_setup: Unable to allocate HBQ.");
6421 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6422 return (1);
6423 }
6424
6425 hbq->HBQ_recvNotify = 1;
6426 hbq->HBQ_num_mask = 0; /* Bind to ring */
6427 hbq->HBQ_profile = 0; /* Selection profile */
6428 /* 0=all, 7=logentry */
6429 hbq->HBQ_ringMask = 1 << ringno; /* b0100 * ringno - Binds */
6430 /* HBQ to a ring */
6431 /* Ring0=b0001, Ring1=b0010, */
6432 /* Ring2=b0100 */
6433 hbq->HBQ_headerLen = 0; /* 0 if not profile 4 or 5 */
6434 hbq->HBQ_logEntry = 0; /* Set to 1 if this HBQ will */
6435 /* be used for */
6436 hbq->HBQ_id = hbq_id;
6437 hbq->HBQ_PutIdx_next = 0;
6438 hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6439 hbq->HBQ_GetIdx = 0;
6440 hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6441 bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6442
6443 /* Fill in POST BUFFERs in HBQE */
6444 hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6445 for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6446 /* Allocate buffer to post */
6447 if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6448 seg)) == 0) {
6449 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6450 "hbq_setup: Unable to allocate HBQ buffer. "
6451 "cnt=%d", j);
6452 emlxs_hbq_free_all(hba, hbq_id);
6453 return (1);
6454 }
6455
6456 hbq->HBQ_PostBufs[j] = mp;
6457
6458 hbqE->unt.ext.HBQ_tag = hbq_id;
6459 hbqE->unt.ext.HBQE_tag = j;
6460 hbqE->bde.tus.f.bdeSize = size;
6461 hbqE->bde.tus.f.bdeFlags = 0;
6462 hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6463 hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6464 hbqE->bde.addrLow =
6465 BE_SWAP32(PADDR_LO(mp->phys));
6466 hbqE->bde.addrHigh =
6467 BE_SWAP32(PADDR_HI(mp->phys));
6468 }
6469
6470 /* Issue CONFIG_HBQ */
6471 emlxs_mb_config_hbq(hba, mbq, hbq_id);
6472 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6473 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6474 "hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6475 mb->mbxCommand, mb->mbxStatus);
6476
6477 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6478 emlxs_hbq_free_all(hba, hbq_id);
6479 return (1);
6480 }
6481
6482 /* Setup HBQ Get/Put indexes */
6483 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6484 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6485 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6486
6487 hba->sli.sli3.hbq_count++;
6488
6489 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6490
6491 #ifdef FMA_SUPPORT
6492 /* Access handle validation */
6493 if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6494 != DDI_FM_OK) {
6495 EMLXS_MSGF(EMLXS_CONTEXT,
6496 &emlxs_invalid_access_handle_msg, NULL);
6497 emlxs_hbq_free_all(hba, hbq_id);
6498 return (1);
6499 }
6500 #endif /* FMA_SUPPORT */
6501
6502 return (0);
6503
6504 } /* emlxs_hbq_setup() */
6505
6506
6507 extern void
emlxs_hbq_free_all(emlxs_hba_t * hba,uint32_t hbq_id)6508 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6509 {
6510 HBQ_INIT_t *hbq;
6511 MBUF_INFO *buf_info;
6512 MBUF_INFO bufinfo;
6513 uint32_t seg;
6514 uint32_t j;
6515
6516 switch (hbq_id) {
6517 case EMLXS_ELS_HBQ_ID:
6518 seg = MEM_ELSBUF;
6519 HBASTATS.ElsUbPosted = 0;
6520 break;
6521
6522 case EMLXS_IP_HBQ_ID:
6523 seg = MEM_IPBUF;
6524 HBASTATS.IpUbPosted = 0;
6525 break;
6526
6527 case EMLXS_CT_HBQ_ID:
6528 seg = MEM_CTBUF;
6529 HBASTATS.CtUbPosted = 0;
6530 break;
6531
6532 #ifdef SFCT_SUPPORT
6533 case EMLXS_FCT_HBQ_ID:
6534 seg = MEM_FCTBUF;
6535 HBASTATS.FctUbPosted = 0;
6536 break;
6537 #endif /* SFCT_SUPPORT */
6538
6539 default:
6540 return;
6541 }
6542
6543
6544 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6545
6546 if (hbq->HBQ_host_buf.virt != 0) {
6547 for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6548 emlxs_mem_put(hba, seg,
6549 (void *)hbq->HBQ_PostBufs[j]);
6550 hbq->HBQ_PostBufs[j] = NULL;
6551 }
6552 hbq->HBQ_PostBufCnt = 0;
6553
6554 buf_info = &bufinfo;
6555 bzero(buf_info, sizeof (MBUF_INFO));
6556
6557 buf_info->size = hbq->HBQ_host_buf.size;
6558 buf_info->virt = hbq->HBQ_host_buf.virt;
6559 buf_info->phys = hbq->HBQ_host_buf.phys;
6560 buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6561 buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6562 buf_info->flags = FC_MBUF_DMA;
6563
6564 emlxs_mem_free(hba, buf_info);
6565
6566 hbq->HBQ_host_buf.virt = NULL;
6567 }
6568
6569 return;
6570
6571 } /* emlxs_hbq_free_all() */
6572
6573
6574 extern void
emlxs_update_HBQ_index(emlxs_hba_t * hba,uint32_t hbq_id)6575 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6576 {
6577 #ifdef FMA_SUPPORT
6578 emlxs_port_t *port = &PPORT;
6579 #endif /* FMA_SUPPORT */
6580 void *ioa2;
6581 uint32_t status;
6582 uint32_t HBQ_PortGetIdx;
6583 HBQ_INIT_t *hbq;
6584
6585 switch (hbq_id) {
6586 case EMLXS_ELS_HBQ_ID:
6587 HBASTATS.ElsUbPosted++;
6588 break;
6589
6590 case EMLXS_IP_HBQ_ID:
6591 HBASTATS.IpUbPosted++;
6592 break;
6593
6594 case EMLXS_CT_HBQ_ID:
6595 HBASTATS.CtUbPosted++;
6596 break;
6597
6598 #ifdef SFCT_SUPPORT
6599 case EMLXS_FCT_HBQ_ID:
6600 HBASTATS.FctUbPosted++;
6601 break;
6602 #endif /* SFCT_SUPPORT */
6603
6604 default:
6605 return;
6606 }
6607
6608 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6609
6610 hbq->HBQ_PutIdx =
6611 (hbq->HBQ_PutIdx + 1 >=
6612 hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6613
6614 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6615 HBQ_PortGetIdx =
6616 BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6617 HBQ_PortGetIdx[hbq_id]);
6618
6619 hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6620
6621 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6622 return;
6623 }
6624 }
6625
6626 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6627 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6628 status = hbq->HBQ_PutIdx;
6629 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6630
6631 #ifdef FMA_SUPPORT
6632 /* Access handle validation */
6633 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6634 #endif /* FMA_SUPPORT */
6635
6636 return;
6637
6638 } /* emlxs_update_HBQ_index() */
6639
6640
6641 static void
emlxs_sli3_enable_intr(emlxs_hba_t * hba)6642 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6643 {
6644 #ifdef FMA_SUPPORT
6645 emlxs_port_t *port = &PPORT;
6646 #endif /* FMA_SUPPORT */
6647 uint32_t status;
6648
6649 /* Enable mailbox, error attention interrupts */
6650 status = (uint32_t)(HC_MBINT_ENA);
6651
6652 /* Enable ring interrupts */
6653 if (hba->sli.sli3.ring_count >= 4) {
6654 status |=
6655 (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6656 HC_R0INT_ENA);
6657 } else if (hba->sli.sli3.ring_count == 3) {
6658 status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6659 } else if (hba->sli.sli3.ring_count == 2) {
6660 status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6661 } else if (hba->sli.sli3.ring_count == 1) {
6662 status |= (HC_R0INT_ENA);
6663 }
6664
6665 hba->sli.sli3.hc_copy = status;
6666 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6667
6668 #ifdef FMA_SUPPORT
6669 /* Access handle validation */
6670 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6671 #endif /* FMA_SUPPORT */
6672
6673 } /* emlxs_sli3_enable_intr() */
6674
6675
6676 static void
emlxs_enable_latt(emlxs_hba_t * hba)6677 emlxs_enable_latt(emlxs_hba_t *hba)
6678 {
6679 #ifdef FMA_SUPPORT
6680 emlxs_port_t *port = &PPORT;
6681 #endif /* FMA_SUPPORT */
6682
6683 mutex_enter(&EMLXS_PORT_LOCK);
6684 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6685 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6686 #ifdef FMA_SUPPORT
6687 /* Access handle validation */
6688 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6689 #endif /* FMA_SUPPORT */
6690 mutex_exit(&EMLXS_PORT_LOCK);
6691
6692 } /* emlxs_enable_latt() */
6693
6694
6695 static void
emlxs_sli3_disable_intr(emlxs_hba_t * hba,uint32_t att)6696 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6697 {
6698 #ifdef FMA_SUPPORT
6699 emlxs_port_t *port = &PPORT;
6700 #endif /* FMA_SUPPORT */
6701
6702 /* Disable all adapter interrupts */
6703 hba->sli.sli3.hc_copy = att;
6704 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6705 #ifdef FMA_SUPPORT
6706 /* Access handle validation */
6707 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6708 #endif /* FMA_SUPPORT */
6709
6710 } /* emlxs_sli3_disable_intr() */
6711
6712
6713 static uint32_t
emlxs_check_attention(emlxs_hba_t * hba)6714 emlxs_check_attention(emlxs_hba_t *hba)
6715 {
6716 #ifdef FMA_SUPPORT
6717 emlxs_port_t *port = &PPORT;
6718 #endif /* FMA_SUPPORT */
6719 uint32_t ha_copy;
6720
6721 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6722 #ifdef FMA_SUPPORT
6723 /* Access handle validation */
6724 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6725 #endif /* FMA_SUPPORT */
6726 return (ha_copy);
6727
6728 } /* emlxs_check_attention() */
6729
6730
6731 static void
emlxs_sli3_poll_erratt(emlxs_hba_t * hba)6732 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6733 {
6734 uint32_t ha_copy;
6735
6736 ha_copy = emlxs_check_attention(hba);
6737
6738 /* Adapter error */
6739 if (ha_copy & HA_ERATT) {
6740 HBASTATS.IntrEvent[6]++;
6741 emlxs_handle_ff_error(hba);
6742 }
6743
6744 } /* emlxs_sli3_poll_erratt() */
6745
6746
6747 static uint32_t
emlxs_sli3_reg_did_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)6748 emlxs_sli3_reg_did_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
6749 {
6750 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
6751 MAILBOXQ *mboxq;
6752 MAILBOX *mb;
6753 MATCHMAP *mp;
6754 NODELIST *ndlp;
6755 emlxs_port_t *vport;
6756 SERV_PARM *sp;
6757 int32_t i;
6758 uint32_t control;
6759 uint32_t ldata;
6760 uint32_t ldid;
6761 uint16_t lrpi;
6762 uint16_t lvpi;
6763 uint32_t rval;
6764
6765 mb = (MAILBOX *)mbq;
6766
6767 if (mb->mbxStatus) {
6768 if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
6769 control = mb->un.varRegLogin.un.sp.bdeSize;
6770 if (control == 0) {
6771 /* Special handle for vport PLOGI */
6772 if (mbq->iocbq == (uint8_t *)1) {
6773 mbq->iocbq = NULL;
6774 }
6775 return (0);
6776 }
6777 emlxs_mb_retry(hba, mbq);
6778 return (1);
6779 }
6780 if (mb->mbxStatus == MBXERR_RPI_FULL) {
6781 EMLXS_MSGF(EMLXS_CONTEXT,
6782 &emlxs_node_create_failed_msg,
6783 "Limit reached. count=%d", port->node_count);
6784 }
6785
6786 /* Special handle for vport PLOGI */
6787 if (mbq->iocbq == (uint8_t *)1) {
6788 mbq->iocbq = NULL;
6789 }
6790
6791 return (0);
6792 }
6793
6794 mp = (MATCHMAP *)mbq->bp;
6795 if (!mp) {
6796 return (0);
6797 }
6798
6799 ldata = mb->un.varWords[5];
6800 lvpi = (ldata & 0xffff);
6801 port = &VPORT(lvpi);
6802
6803 /* First copy command data */
6804 ldata = mb->un.varWords[0]; /* get rpi */
6805 lrpi = ldata & 0xffff;
6806
6807 ldata = mb->un.varWords[1]; /* get did */
6808 ldid = ldata & MASK_DID;
6809
6810 sp = (SERV_PARM *)mp->virt;
6811
6812 /* Create or update the node */
6813 ndlp = emlxs_node_create(port, ldid, lrpi, sp);
6814
6815 if (ndlp == NULL) {
6816 emlxs_ub_priv_t *ub_priv;
6817
6818 /*
6819 * Fake a mailbox error, so the mbox_fini
6820 * can take appropriate action
6821 */
6822 mb->mbxStatus = MBXERR_RPI_FULL;
6823 if (mbq->ubp) {
6824 ub_priv = ((fc_unsol_buf_t *)mbq->ubp)->ub_fca_private;
6825 ub_priv->flags |= EMLXS_UB_REPLY;
6826 }
6827
6828 /* This must be (0xFFFFFE) which was registered by vport */
6829 if (lrpi == 0) {
6830 return (0);
6831 }
6832
6833 if (!(mboxq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
6834 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6835 "reg_did_mbcmpl:failed. Unable to allocate mbox");
6836 return (0);
6837 }
6838
6839 mb = (MAILBOX *)mboxq->mbox;
6840 mb->un.varUnregLogin.rpi = lrpi;
6841 mb->un.varUnregLogin.vpi = lvpi;
6842
6843 mb->mbxCommand = MBX_UNREG_LOGIN;
6844 mb->mbxOwner = OWN_HOST;
6845 mboxq->sbp = NULL;
6846 mboxq->ubp = NULL;
6847 mboxq->iocbq = NULL;
6848 mboxq->mbox_cmpl = NULL;
6849 mboxq->context = NULL;
6850 mboxq->port = (void *)port;
6851
6852 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mboxq, MBX_NOWAIT, 0);
6853 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
6854 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6855 "reg_did_mbcmpl:failed. Unable to send request.");
6856
6857 emlxs_mem_put(hba, MEM_MBOX, (void *)mboxq);
6858 return (0);
6859 }
6860
6861 return (0);
6862 }
6863
6864 if (ndlp->nlp_DID == FABRIC_DID) {
6865 /* FLOGI/FDISC successfully completed on this port */
6866 mutex_enter(&EMLXS_PORT_LOCK);
6867 port->flag |= EMLXS_PORT_FLOGI_CMPL;
6868 mutex_exit(&EMLXS_PORT_LOCK);
6869
6870 /* If CLEAR_LA has been sent, then attempt to */
6871 /* register the vpi now */
6872 if (hba->state == FC_READY) {
6873 (void) emlxs_mb_reg_vpi(port, NULL);
6874 }
6875
6876 /*
6877 * If NPIV Fabric support has just been established on
6878 * the physical port, then notify the vports of the
6879 * link up
6880 */
6881 if ((lvpi == 0) &&
6882 (hba->flag & FC_NPIV_ENABLED) &&
6883 (hba->flag & FC_NPIV_SUPPORTED)) {
6884 /* Skip the physical port */
6885 for (i = 1; i < MAX_VPORTS; i++) {
6886 vport = &VPORT(i);
6887
6888 if (!(vport->flag & EMLXS_PORT_BOUND) ||
6889 !(vport->flag &
6890 EMLXS_PORT_ENABLED)) {
6891 continue;
6892 }
6893
6894 emlxs_port_online(vport);
6895 }
6896 }
6897 }
6898
6899 /* Check for special restricted login flag */
6900 if (mbq->iocbq == (uint8_t *)1) {
6901 mbq->iocbq = NULL;
6902 (void) EMLXS_SLI_UNREG_NODE(port, ndlp, NULL, NULL, NULL);
6903 return (0);
6904 }
6905
6906 /* Needed for FCT trigger in emlxs_mb_deferred_cmpl */
6907 if (mbq->sbp) {
6908 ((emlxs_buf_t *)mbq->sbp)->node = ndlp;
6909 }
6910
6911 #ifdef DHCHAP_SUPPORT
6912 if (mbq->sbp || mbq->ubp) {
6913 if (emlxs_dhc_auth_start(port, ndlp, mbq->sbp,
6914 mbq->ubp) == 0) {
6915 /* Auth started - auth completion will */
6916 /* handle sbp and ubp now */
6917 mbq->sbp = NULL;
6918 mbq->ubp = NULL;
6919 }
6920 }
6921 #endif /* DHCHAP_SUPPORT */
6922
6923 return (0);
6924
6925 } /* emlxs_sli3_reg_did_mbcmpl() */
6926
6927
6928 static uint32_t
emlxs_sli3_reg_did(emlxs_port_t * port,uint32_t did,SERV_PARM * param,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)6929 emlxs_sli3_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
6930 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
6931 {
6932 emlxs_hba_t *hba = HBA;
6933 MATCHMAP *mp;
6934 MAILBOXQ *mbq;
6935 MAILBOX *mb;
6936 uint32_t rval;
6937
6938 /* Check for invalid node ids to register */
6939 if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
6940 return (1);
6941 }
6942
6943 if (did & 0xff000000) {
6944 return (1);
6945 }
6946
6947 if ((rval = emlxs_mb_check_sparm(hba, param))) {
6948 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6949 "Invalid service parameters. did=%06x rval=%d", did,
6950 rval);
6951
6952 return (1);
6953 }
6954
6955 /* Check if the node limit has been reached */
6956 if (port->node_count >= hba->max_nodes) {
6957 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6958 "Limit reached. did=%06x count=%d", did,
6959 port->node_count);
6960
6961 return (1);
6962 }
6963
6964 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
6965 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6966 "Unable to allocate mailbox. did=%x", did);
6967
6968 return (1);
6969 }
6970 mb = (MAILBOX *)mbq->mbox;
6971 bzero((void *)mb, MAILBOX_CMD_BSIZE);
6972
6973 /* Build login request */
6974 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
6975 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6976
6977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6978 "Unable to allocate buffer. did=%x", did);
6979 return (1);
6980 }
6981 bcopy((void *)param, (void *)mp->virt, sizeof (SERV_PARM));
6982
6983 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
6984 mb->un.varRegLogin.un.sp64.addrHigh = PADDR_HI(mp->phys);
6985 mb->un.varRegLogin.un.sp64.addrLow = PADDR_LO(mp->phys);
6986 mb->un.varRegLogin.did = did;
6987 mb->un.varWords[30] = 0; /* flags */
6988 mb->mbxCommand = MBX_REG_LOGIN64;
6989 mb->mbxOwner = OWN_HOST;
6990 mb->un.varRegLogin.vpi = port->vpi;
6991 mb->un.varRegLogin.rpi = 0;
6992
6993 mbq->sbp = (void *)sbp;
6994 mbq->ubp = (void *)ubp;
6995 mbq->iocbq = (void *)iocbq;
6996 mbq->bp = (void *)mp;
6997 mbq->mbox_cmpl = emlxs_sli3_reg_did_mbcmpl;
6998 mbq->context = NULL;
6999 mbq->port = (void *)port;
7000
7001 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
7002 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
7003 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
7004 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7005
7006 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
7007 "Unable to send mbox. did=%x", did);
7008 return (1);
7009 }
7010
7011 return (0);
7012
7013 } /* emlxs_sli3_reg_did() */
7014
7015
7016 /*ARGSUSED*/
7017 static uint32_t
emlxs_sli3_unreg_node_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)7018 emlxs_sli3_unreg_node_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
7019 {
7020 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
7021 MAILBOX *mb;
7022 NODELIST *node;
7023 uint16_t rpi;
7024
7025 node = (NODELIST *)mbq->context;
7026 mb = (MAILBOX *)mbq;
7027 rpi = (node)? node->nlp_Rpi:0xffff;
7028
7029 if (mb->mbxStatus) {
7030 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7031 "unreg_node_mbcmpl:failed. node=%p rpi=%d status=%x",
7032 node, rpi, mb->mbxStatus);
7033
7034 return (0);
7035 }
7036
7037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7038 "unreg_node_mbcmpl: node=%p rpi=%d",
7039 node, rpi);
7040
7041 if (node) {
7042 emlxs_node_rm(port, node);
7043
7044 } else { /* All nodes */
7045 emlxs_node_destroy_all(port);
7046 }
7047
7048 return (0);
7049
7050 } /* emlxs_sli3_unreg_node_mbcmpl */
7051
7052
7053 static uint32_t
emlxs_sli3_unreg_node(emlxs_port_t * port,NODELIST * node,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)7054 emlxs_sli3_unreg_node(emlxs_port_t *port, NODELIST *node, emlxs_buf_t *sbp,
7055 fc_unsol_buf_t *ubp, IOCBQ *iocbq)
7056 {
7057 emlxs_hba_t *hba = HBA;
7058 MAILBOXQ *mbq;
7059 MAILBOX *mb;
7060 uint16_t rpi;
7061 uint32_t rval;
7062
7063 if (node) {
7064 /* Check for base node */
7065 if (node == &port->node_base) {
7066 /* just flush base node */
7067 (void) emlxs_tx_node_flush(port, &port->node_base,
7068 0, 0, 0);
7069 (void) emlxs_chipq_node_flush(port, 0,
7070 &port->node_base, 0);
7071
7072 port->did = 0;
7073
7074 /* Return now */
7075 return (1);
7076 }
7077
7078 rpi = (uint16_t)node->nlp_Rpi;
7079
7080 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7081 "unreg_node:%p rpi=%d", node, rpi);
7082
7083 /* This node must be (0xFFFFFE) which registered by vport */
7084 if (rpi == 0) {
7085 emlxs_node_rm(port, node);
7086 return (0);
7087 }
7088
7089 } else { /* Unreg all nodes */
7090 rpi = 0xffff;
7091
7092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7093 "unreg_node: All");
7094 }
7095
7096 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
7097 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7098 "unreg_node:failed. Unable to allocate mbox");
7099 return (1);
7100 }
7101
7102 mb = (MAILBOX *)mbq->mbox;
7103 mb->un.varUnregLogin.rpi = rpi;
7104 mb->un.varUnregLogin.vpi = port->vpip->VPI;
7105
7106 mb->mbxCommand = MBX_UNREG_LOGIN;
7107 mb->mbxOwner = OWN_HOST;
7108 mbq->sbp = (void *)sbp;
7109 mbq->ubp = (void *)ubp;
7110 mbq->iocbq = (void *)iocbq;
7111 mbq->mbox_cmpl = emlxs_sli3_unreg_node_mbcmpl;
7112 mbq->context = (void *)node;
7113 mbq->port = (void *)port;
7114
7115 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
7116 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
7117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7118 "unreg_node:failed. Unable to send request.");
7119
7120 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7121 return (1);
7122 }
7123
7124 return (0);
7125
7126 } /* emlxs_sli3_unreg_node() */
7127