1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2020 RackTop Systems, Inc.
26 */
27
28 #include <emlxs.h>
29
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_MBOX_C);
32
33 #define SLI_PAGE_SIZE 4096
34
35 emlxs_table_t emlxs_mb_status_table[] = {
36 {MBX_SUCCESS, "SUCCESS"},
37 {MBX_FAILURE, "FAILURE"},
38 {MBXERR_NUM_IOCBS, "NUM_IOCBS"},
39 {MBXERR_IOCBS_EXCEEDED, "IOCBS_EXCEEDED"},
40 {MBXERR_BAD_RING_NUMBER, "BAD_RING_NUMBER"},
41 {MBXERR_MASK_ENTRIES_RANGE, "MASK_ENTRIES_RANGE"},
42 {MBXERR_MASKS_EXCEEDED, "MASKS_EXCEEDED"},
43 {MBXERR_BAD_PROFILE, "BAD_PROFILE"},
44 {MBXERR_BAD_DEF_CLASS, "BAD_DEF_CLASS"},
45 {MBXERR_BAD_MAX_RESPONDER, "BAD_MAX_RESPONDER"},
46 {MBXERR_BAD_MAX_ORIGINATOR, "BAD_MAX_ORIGINATOR"},
47 {MBXERR_RPI_REGISTERED, "RPI_REGISTERED"},
48 {MBXERR_RPI_FULL, "RPI_FULL"},
49 {MBXERR_NO_RESOURCES, "NO_RESOURCES"},
50 {MBXERR_BAD_RCV_LENGTH, "BAD_RCV_LENGTH"},
51 {MBXERR_DMA_ERROR, "DMA_ERROR"},
52 {MBXERR_NOT_SUPPORTED, "NOT_SUPPORTED"},
53 {MBXERR_UNSUPPORTED_FEATURE, "UNSUPPORTED_FEATURE"},
54 {MBXERR_UNKNOWN_COMMAND, "UNKNOWN_COMMAND"},
55 {MBXERR_BAD_IP_BIT, "BAD_IP_BIT"},
56 {MBXERR_BAD_PCB_ALIGN, "BAD_PCB_ALIGN"},
57 {MBXERR_BAD_HBQ_ID, "BAD_HBQ_ID"},
58 {MBXERR_BAD_HBQ_STATE, "BAD_HBQ_STATE"},
59 {MBXERR_BAD_HBQ_MASK_NUM, "BAD_HBQ_MASK_NUM"},
60 {MBXERR_BAD_HBQ_MASK_SUBSET, "BAD_HBQ_MASK_SUBSET"},
61 {MBXERR_HBQ_CREATE_FAIL, "HBQ_CREATE_FAIL"},
62 {MBXERR_HBQ_EXISTING, "HBQ_EXISTING"},
63 {MBXERR_HBQ_RSPRING_FULL, "HBQ_RSPRING_FULL"},
64 {MBXERR_HBQ_DUP_MASK, "HBQ_DUP_MASK"},
65 {MBXERR_HBQ_INVAL_GET_PTR, "HBQ_INVAL_GET_PTR"},
66 {MBXERR_BAD_HBQ_SIZE, "BAD_HBQ_SIZE"},
67 {MBXERR_BAD_HBQ_ORDER, "BAD_HBQ_ORDER"},
68 {MBXERR_INVALID_ID, "INVALID_ID"},
69 {MBXERR_INVALID_VFI, "INVALID_VFI"},
70 {MBXERR_FLASH_WRITE_FAILED, "FLASH_WRITE_FAILED"},
71 {MBXERR_INVALID_LINKSPEED, "INVALID_LINKSPEED"},
72 {MBXERR_BAD_REDIRECT, "BAD_REDIRECT"},
73 {MBXERR_RING_ALREADY_CONFIG, "RING_ALREADY_CONFIG"},
74 {MBXERR_RING_INACTIVE, "RING_INACTIVE"},
75 {MBXERR_RPI_INACTIVE, "RPI_INACTIVE"},
76 {MBXERR_NO_ACTIVE_XRI, "NO_ACTIVE_XRI"},
77 {MBXERR_XRI_NOT_ACTIVE, "XRI_NOT_ACTIVE"},
78 {MBXERR_RPI_INUSE, "RPI_INUSE"},
79 {MBXERR_NO_LINK_ATTENTION, "NO_LINK_ATTENTION"},
80 {MBXERR_INVALID_SLI_MODE, "INVALID_SLI_MODE"},
81 {MBXERR_INVALID_HOST_PTR, "INVALID_HOST_PTR"},
82 {MBXERR_CANT_CFG_SLI_MODE, "CANT_CFG_SLI_MODE"},
83 {MBXERR_BAD_OVERLAY, "BAD_OVERLAY"},
84 {MBXERR_INVALID_FEAT_REQ, "INVALID_FEAT_REQ"},
85 {MBXERR_CONFIG_CANT_COMPLETE, "CONFIG_CANT_COMPLETE"},
86 {MBXERR_DID_ALREADY_REGISTERED, "DID_ALREADY_REGISTERED"},
87 {MBXERR_DID_INCONSISTENT, "DID_INCONSISTENT"},
88 {MBXERR_VPI_TOO_LARGE, "VPI_TOO_LARGE"},
89 {MBXERR_STILL_ASSOCIATED, "STILL_ASSOCIATED"},
90 {MBXERR_INVALID_VF_STATE, "INVALID_VF_STATE"},
91 {MBXERR_VFI_ALREADY_REGISTERED, "VFI_ALREADY_REGISTERED"},
92 {MBXERR_VFI_TOO_LARGE, "VFI_TOO_LARGE"},
93 {MBXERR_LOAD_FW_FAILED, "LOAD_FW_FAILED"},
94 {MBXERR_FIND_FW_FAILED, "FIND_FW_FAILED"},
95 };
96
97 emlxs_table_t emlxs_mb_cmd_table[] = {
98 {MBX_SHUTDOWN, "SHUTDOWN"},
99 {MBX_LOAD_SM, "LOAD_SM"},
100 {MBX_READ_NV, "READ_NV"},
101 {MBX_WRITE_NV, "WRITE_NV"},
102 {MBX_RUN_BIU_DIAG, "RUN_BIU_DIAG"},
103 {MBX_INIT_LINK, "INIT_LINK"},
104 {MBX_DOWN_LINK, "DOWN_LINK"},
105 {MBX_CONFIG_LINK, "CONFIG_LINK"},
106 {MBX_PART_SLIM, "PART_SLIM"},
107 {MBX_CONFIG_RING, "CONFIG_RING"},
108 {MBX_RESET_RING, "RESET_RING"},
109 {MBX_READ_CONFIG, "READ_CONFIG"},
110 {MBX_READ_RCONFIG, "READ_RCONFIG"},
111 {MBX_READ_SPARM, "READ_SPARM"},
112 {MBX_READ_STATUS, "READ_STATUS"},
113 {MBX_READ_RPI, "READ_RPI"},
114 {MBX_READ_XRI, "READ_XRI"},
115 {MBX_READ_REV, "READ_REV"},
116 {MBX_READ_LNK_STAT, "READ_LNK_STAT"},
117 {MBX_REG_LOGIN, "REG_LOGIN"},
118 {MBX_UNREG_LOGIN, "UNREG_RPI"},
119 {MBX_READ_LA, "READ_LA"},
120 {MBX_CLEAR_LA, "CLEAR_LA"},
121 {MBX_DUMP_MEMORY, "DUMP_MEMORY"},
122 {MBX_DUMP_CONTEXT, "DUMP_CONTEXT"},
123 {MBX_RUN_DIAGS, "RUN_DIAGS"},
124 {MBX_RESTART, "RESTART"},
125 {MBX_UPDATE_CFG, "UPDATE_CFG"},
126 {MBX_DOWN_LOAD, "DOWN_LOAD"},
127 {MBX_DEL_LD_ENTRY, "DEL_LD_ENTRY"},
128 {MBX_RUN_PROGRAM, "RUN_PROGRAM"},
129 {MBX_SET_MASK, "SET_MASK"},
130 {MBX_SET_VARIABLE, "SET_VARIABLE"},
131 {MBX_UNREG_D_ID, "UNREG_D_ID"},
132 {MBX_KILL_BOARD, "KILL_BOARD"},
133 {MBX_CONFIG_FARP, "CONFIG_FARP"},
134 {MBX_LOAD_AREA, "LOAD_AREA"},
135 {MBX_RUN_BIU_DIAG64, "RUN_BIU_DIAG64"},
136 {MBX_CONFIG_PORT, "CONFIG_PORT"},
137 {MBX_READ_SPARM64, "READ_SPARM64"},
138 {MBX_READ_RPI64, "READ_RPI64"},
139 {MBX_CONFIG_MSI, "CONFIG_MSI"},
140 {MBX_CONFIG_MSIX, "CONFIG_MSIX"},
141 {MBX_REG_LOGIN64, "REG_RPI"},
142 {MBX_READ_LA64, "READ_LA64"},
143 {MBX_FLASH_WR_ULA, "FLASH_WR_ULA"},
144 {MBX_SET_DEBUG, "SET_DEBUG"},
145 {MBX_GET_DEBUG, "GET_DEBUG"},
146 {MBX_LOAD_EXP_ROM, "LOAD_EXP_ROM"},
147 {MBX_BEACON, "BEACON"},
148 {MBX_CONFIG_HBQ, "CONFIG_HBQ"}, /* SLI3 */
149 {MBX_REG_VPI, "REG_VPI"}, /* NPIV */
150 {MBX_UNREG_VPI, "UNREG_VPI"}, /* NPIV */
151 {MBX_ASYNC_EVENT, "ASYNC_EVENT"},
152 {MBX_HEARTBEAT, "HEARTBEAT"},
153 {MBX_READ_EVENT_LOG_STATUS, "READ_EVENT_LOG_STATUS"},
154 {MBX_READ_EVENT_LOG, "READ_EVENT_LOG"},
155 {MBX_WRITE_EVENT_LOG, "WRITE_EVENT_LOG"},
156 {MBX_NV_LOG, "NV_LOG"},
157 {MBX_PORT_CAPABILITIES, "PORT_CAPABILITIES"},
158 {MBX_IOV_CONTROL, "IOV_CONTROL"},
159 {MBX_IOV_MBX, "IOV_MBX"},
160 {MBX_SLI_CONFIG, "SLI_CONFIG"},
161 {MBX_REQUEST_FEATURES, "REQUEST_FEATURES"},
162 {MBX_RESUME_RPI, "RESUME_RPI"},
163 {MBX_REG_VFI, "REG_VFI"},
164 {MBX_REG_FCFI, "REG_FCFI"},
165 {MBX_UNREG_VFI, "UNREG_VFI"},
166 {MBX_UNREG_FCFI, "UNREG_FCFI"},
167 {MBX_INIT_VFI, "INIT_VFI"},
168 {MBX_INIT_VPI, "INIT_VPI"},
169 {MBX_WRITE_VPARMS, "WRITE_VPARMS"},
170 {MBX_ACCESS_VDATA, "ACCESS_VDATA"}
171 }; /* emlxs_mb_cmd_table */
172
173
174 emlxs_table_t emlxs_request_feature_table[] = {
175 {SLI4_FEATURE_INHIBIT_AUTO_ABTS, "IAA "}, /* Bit 0 */
176 {SLI4_FEATURE_NPIV, "NPIV "}, /* Bit 1 */
177 {SLI4_FEATURE_DIF, "DIF "}, /* Bit 2 */
178 {SLI4_FEATURE_VIRTUAL_FABRICS, "VF "}, /* Bit 3 */
179 {SLI4_FEATURE_FCP_INITIATOR, "FCPI "}, /* Bit 4 */
180 {SLI4_FEATURE_FCP_TARGET, "FCPT "}, /* Bit 5 */
181 {SLI4_FEATURE_FCP_COMBO, "FCPC "}, /* Bit 6 */
182 {SLI4_FEATURE_RSVD1, "RSVD1 "}, /* Bit 7 */
183 {SLI4_FEATURE_RQD, "RQD "}, /* Bit 8 */
184 {SLI4_FEATURE_INHIBIT_AUTO_ABTS_R, "IAAR "}, /* Bit 9 */
185 {SLI4_FEATURE_HIGH_LOGIN_MODE, "HLM "}, /* Bit 10 */
186 {SLI4_FEATURE_PERF_HINT, "PERFH "} /* Bit 11 */
187 }; /* emlxs_request_feature_table */
188
189
190 extern char *
emlxs_mb_xlate_status(uint32_t status)191 emlxs_mb_xlate_status(uint32_t status)
192 {
193 static char buffer[32];
194 uint32_t i;
195 uint32_t count;
196
197 count = sizeof (emlxs_mb_status_table) / sizeof (emlxs_table_t);
198 for (i = 0; i < count; i++) {
199 if (status == emlxs_mb_status_table[i].code) {
200 return (emlxs_mb_status_table[i].string);
201 }
202 }
203
204 (void) snprintf(buffer, sizeof (buffer), "status=%x", status);
205 return (buffer);
206
207 } /* emlxs_mb_xlate_status() */
208
209
210 /* SLI4 */
211 /*ARGSUSED*/
212 extern void
emlxs_mb_resetport(emlxs_hba_t * hba,MAILBOXQ * mbq)213 emlxs_mb_resetport(emlxs_hba_t *hba, MAILBOXQ *mbq)
214 {
215 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
216
217 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
218 mbq->nonembed = NULL;
219 mbq->mbox_cmpl = NULL; /* no cmpl needed */
220 mbq->port = (void *)&PPORT;
221
222 /*
223 * Signifies an embedded command
224 */
225 mb4->un.varSLIConfig.be.embedded = 1;
226
227 mb4->mbxCommand = MBX_SLI_CONFIG;
228 mb4->mbxOwner = OWN_HOST;
229 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
230 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
231 IOCTL_SUBSYSTEM_COMMON;
232 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_RESET;
233 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
234 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
235
236 return;
237
238 } /* emlxs_mb_resetport() */
239
240
241 /* SLI4 */
242 /*ARGSUSED*/
243 extern void
emlxs_mb_request_features(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t mask)244 emlxs_mb_request_features(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t mask)
245 {
246 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
247
248 hba->flag &= ~FC_NPIV_ENABLED;
249 hba->sli.sli4.flag &= ~(EMLXS_SLI4_PHON | EMLXS_SLI4_PHWQ);
250
251 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
252 mbq->nonembed = NULL;
253 mbq->mbox_cmpl = NULL; /* no cmpl needed */
254 mbq->port = (void *)&PPORT;
255
256 mb4->mbxCommand = MBX_REQUEST_FEATURES;
257 mb4->mbxOwner = OWN_HOST;
258
259 mb4->un.varReqFeatures.featuresRequested = mask;
260 return;
261
262 } /* emlxs_mb_request_features() */
263
264
265 /* SLI4 */
266 /*ARGSUSED*/
267 extern void
emlxs_mb_noop(emlxs_hba_t * hba,MAILBOXQ * mbq)268 emlxs_mb_noop(emlxs_hba_t *hba, MAILBOXQ *mbq)
269 {
270 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
271 IOCTL_COMMON_NOP *nop;
272
273 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
274 mbq->nonembed = NULL;
275 mbq->mbox_cmpl = NULL; /* no cmpl needed */
276 mbq->port = (void *)&PPORT;
277
278 /*
279 * Signifies an embedded command
280 */
281 mb4->un.varSLIConfig.be.embedded = 1;
282
283 mb4->mbxCommand = MBX_SLI_CONFIG;
284 mb4->mbxOwner = OWN_HOST;
285 mb4->un.varSLIConfig.be.payload_length = sizeof (IOCTL_COMMON_NOP) +
286 IOCTL_HEADER_SZ;
287 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
288 IOCTL_SUBSYSTEM_COMMON;
289 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_NOP;
290 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
291 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
292 sizeof (IOCTL_COMMON_NOP);
293 nop = (IOCTL_COMMON_NOP *)&mb4->un.varSLIConfig.payload;
294 nop->params.request.context = -1;
295
296 return;
297
298 } /* emlxs_mb_noop() */
299
300
301 /* SLI4 */
302 /*ARGSUSED*/
303 extern int
emlxs_mbext_noop(emlxs_hba_t * hba,MAILBOXQ * mbq)304 emlxs_mbext_noop(emlxs_hba_t *hba, MAILBOXQ *mbq)
305 {
306 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
307 IOCTL_COMMON_NOP *nop;
308 MATCHMAP *mp;
309 mbox_req_hdr_t *hdr_req;
310
311 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
312
313 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
314 return (1);
315 }
316 /*
317 * Save address for completion
318 * Signifies a non-embedded command
319 */
320 mb4->un.varSLIConfig.be.embedded = 0;
321 mbq->nonembed = (void *)mp;
322 mbq->mbox_cmpl = NULL; /* no cmpl needed */
323 mbq->port = (void *)&PPORT;
324
325 mb4->mbxCommand = MBX_SLI_CONFIG;
326 mb4->mbxOwner = OWN_HOST;
327
328 hdr_req = (mbox_req_hdr_t *)mp->virt;
329 hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
330 hdr_req->opcode = COMMON_OPCODE_NOP;
331 hdr_req->timeout = 0;
332 hdr_req->req_length = sizeof (IOCTL_COMMON_NOP);
333 nop = (IOCTL_COMMON_NOP *)(hdr_req + 1);
334 nop->params.request.context = -1;
335
336 return (0);
337
338 } /* emlxs_mbext_noop() */
339
340
341 /* SLI4 */
342 /*ARGSUSED*/
343 extern void
emlxs_mb_eq_create(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t num)344 emlxs_mb_eq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
345 {
346 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
347 IOCTL_COMMON_EQ_CREATE *qp;
348 uint64_t addr;
349 emlxs_port_t *port = &PPORT;
350
351 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
352 mbq->nonembed = NULL;
353 mbq->mbox_cmpl = NULL; /* no cmpl needed */
354 mbq->port = (void *)&PPORT;
355
356 /*
357 * Signifies an embedded command
358 */
359 mb4->un.varSLIConfig.be.embedded = 1;
360
361 mb4->mbxCommand = MBX_SLI_CONFIG;
362 mb4->mbxOwner = OWN_HOST;
363 mb4->un.varSLIConfig.be.payload_length =
364 sizeof (IOCTL_COMMON_EQ_CREATE) + IOCTL_HEADER_SZ;
365 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
366 IOCTL_SUBSYSTEM_COMMON;
367 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_EQ_CREATE;
368 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
369 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
370 sizeof (IOCTL_COMMON_EQ_CREATE);
371
372 qp = (IOCTL_COMMON_EQ_CREATE *)&mb4->un.varSLIConfig.payload;
373
374 qp->params.request.EQContext.Valid = 1;
375 qp->params.request.EQContext.DelayMult = EQ_DELAY_MULT;
376
377 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_6) {
378 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
379 } else {
380 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 2;
381 qp->params.request.EQContext.AutoValid = 1;
382 }
383 qp->params.request.NumPages =
384 hba->sli.sli4.eq[num].addr.size / SLI_PAGE_SIZE;
385 /* qp->params.request.EQContext.Size = EQ_ELEMENT_SIZE_4; */
386 switch (qp->params.request.NumPages) {
387 case 1:
388 qp->params.request.EQContext.Count = EQ_ELEMENT_COUNT_1024;
389 break;
390 case 2:
391 qp->params.request.EQContext.Count = EQ_ELEMENT_COUNT_2048;
392 break;
393 case 4:
394 qp->params.request.EQContext.Count = EQ_ELEMENT_COUNT_4096;
395 break;
396 default:
397 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
398 "num_pages %d not valid\n", qp->params.request.NumPages);
399 qp->params.request.NumPages = 1;
400 }
401
402 addr = hba->sli.sli4.eq[num].addr.phys;
403 for (int i = 0; i < qp->params.request.NumPages; i++) {
404 qp->params.request.Pages[i].addrLow = PADDR_LO(addr);
405 qp->params.request.Pages[i].addrHigh = PADDR_HI(addr);
406 addr += SLI_PAGE_SIZE;
407 }
408
409 return;
410
411 } /* emlxs_mb_eq_create() */
412
413
414 /* SLI4 */
415 /*ARGSUSED*/
416 extern void
emlxs_mb_cq_create(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t num)417 emlxs_mb_cq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
418 {
419 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
420 IOCTL_COMMON_CQ_CREATE *qp;
421 IOCTL_COMMON_CQ_CREATE_V2 *qp2;
422 uint64_t addr;
423 uint32_t i;
424
425 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
426 mbq->nonembed = NULL;
427 mbq->mbox_cmpl = NULL; /* no cmpl needed */
428 mbq->port = (void *)&PPORT;
429
430 /*
431 * Signifies an embedded command
432 */
433 mb4->un.varSLIConfig.be.embedded = 1;
434
435 mb4->mbxCommand = MBX_SLI_CONFIG;
436 mb4->mbxOwner = OWN_HOST;
437
438 switch (hba->sli.sli4.param.CQV) {
439 case 0:
440 mb4->un.varSLIConfig.be.payload_length =
441 sizeof (IOCTL_COMMON_CQ_CREATE) + IOCTL_HEADER_SZ;
442 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
443 IOCTL_SUBSYSTEM_COMMON;
444 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
445 COMMON_OPCODE_CQ_CREATE;
446 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
447 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
448 sizeof (IOCTL_COMMON_CQ_CREATE);
449 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
450
451 qp = (IOCTL_COMMON_CQ_CREATE *)
452 &mb4->un.varSLIConfig.payload;
453
454 /* 256 * 16 bytes = 4K */
455 qp->params.request.CQContext.Count = CQ_ELEMENT_COUNT_256;
456 qp->params.request.CQContext.EQId =
457 (uint8_t)hba->sli.sli4.cq[num].eqid;
458 qp->params.request.CQContext.Valid = 1;
459 qp->params.request.CQContext.Eventable = 1;
460 qp->params.request.CQContext.NoDelay = 0;
461 qp->params.request.CQContext.CoalesceWM = 0;
462
463 addr = hba->sli.sli4.cq[num].addr.phys;
464 qp->params.request.NumPages = 1;
465 qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
466 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
467
468 break;
469
470 case 2:
471 default:
472 mb4->un.varSLIConfig.be.payload_length =
473 sizeof (IOCTL_COMMON_CQ_CREATE_V2) + IOCTL_HEADER_SZ;
474 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
475 IOCTL_SUBSYSTEM_COMMON;
476 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
477 COMMON_OPCODE_CQ_CREATE;
478 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
479 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
480 sizeof (IOCTL_COMMON_CQ_CREATE_V2);
481 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 2;
482
483 qp2 = (IOCTL_COMMON_CQ_CREATE_V2 *)
484 &mb4->un.varSLIConfig.payload;
485
486 qp2->params.request.CQContext.CqeCnt = CQ_ELEMENT_COUNT_1024;
487 qp2->params.request.CQContext.CqeSize = CQE_SIZE_16_BYTES;
488 qp2->params.request.CQContext.EQId = hba->sli.sli4.cq[num].eqid;
489 qp2->params.request.CQContext.Valid = 1;
490 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
491 SLI_INTF_IF_TYPE_6) {
492 qp2->params.request.CQContext.AutoValid = 1;
493 } else {
494 qp2->params.request.CQContext.AutoValid = 0;
495 }
496 qp2->params.request.CQContext.Eventable = 1;
497 qp2->params.request.CQContext.NoDelay = 0;
498 qp2->params.request.CQContext.Count1 = 0;
499 qp2->params.request.CQContext.CoalesceWM = 0;
500
501 addr = hba->sli.sli4.cq[num].addr.phys;
502 qp2->params.request.PageSize = CQ_PAGE_SIZE_4K;
503 qp2->params.request.NumPages = EMLXS_NUM_CQ_PAGES_V2;
504
505 for (i = 0; i < EMLXS_NUM_CQ_PAGES_V2; i++) {
506 qp2->params.request.Pages[i].addrLow = PADDR_LO(addr);
507 qp2->params.request.Pages[i].addrHigh = PADDR_HI(addr);
508 addr += 4096;
509 }
510
511 break;
512 }
513 return;
514
515 } /* emlxs_mb_cq_create() */
516
517
518 /* SLI4 */
519 /*ARGSUSED*/
520 extern void
emlxs_mb_get_port_name(emlxs_hba_t * hba,MAILBOXQ * mbq)521 emlxs_mb_get_port_name(emlxs_hba_t *hba, MAILBOXQ *mbq)
522 {
523 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
524
525 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
526 mbq->nonembed = NULL;
527 mbq->mbox_cmpl = NULL; /* no cmpl needed */
528 mbq->port = (void *)&PPORT;
529
530 mb4->un.varSLIConfig.be.embedded = 1;
531 mb4->mbxCommand = MBX_SLI_CONFIG;
532 mb4->mbxOwner = OWN_HOST;
533
534 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
535 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
536 IOCTL_SUBSYSTEM_COMMON;
537 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
538 COMMON_OPCODE_GET_PORT_NAME;
539 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
540 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
541
542 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
543 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; /* V0 */
544 } else {
545 IOCTL_COMMON_GET_PORT_NAME_V1 *pn;
546
547 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1; /* V1 */
548
549 pn = (IOCTL_COMMON_GET_PORT_NAME_V1 *)
550 &mb4->un.varSLIConfig.payload;
551 pn->params.request.pt = PORT_TYPE_FC;
552 }
553
554 return;
555
556 } /* emlxs_mb_get_port_name() */
557
558
559 /* SLI4 */
560 /*ARGSUSED*/
561 extern void
emlxs_mb_get_sli4_params(emlxs_hba_t * hba,MAILBOXQ * mbq)562 emlxs_mb_get_sli4_params(emlxs_hba_t *hba, MAILBOXQ *mbq)
563 {
564 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
565
566 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
567 mbq->nonembed = NULL;
568 mbq->mbox_cmpl = NULL; /* no cmpl needed */
569 mbq->port = (void *)&PPORT;
570
571 mb4->un.varSLIConfig.be.embedded = 1;
572 mb4->mbxCommand = MBX_SLI_CONFIG;
573 mb4->mbxOwner = OWN_HOST;
574
575 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
576 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
577 IOCTL_SUBSYSTEM_COMMON;
578 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
579 COMMON_OPCODE_GET_SLI4_PARAMS;
580 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
581 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
582 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; /* V0 */
583
584 return;
585
586 } /* emlxs_mb_get_sli4_params() */
587
588
589 /* SLI4 */
590 /*ARGSUSED*/
591 extern void
emlxs_mb_get_extents_info(emlxs_hba_t * hba,MAILBOXQ * mbq,uint16_t type)592 emlxs_mb_get_extents_info(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
593 {
594 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
595 IOCTL_COMMON_EXTENTS *ep;
596
597 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
598 mbq->nonembed = NULL;
599 mbq->mbox_cmpl = NULL; /* no cmpl needed */
600 mbq->port = (void *)&PPORT;
601
602 mb4->un.varSLIConfig.be.embedded = 1;
603 mb4->mbxCommand = MBX_SLI_CONFIG;
604 mb4->mbxOwner = OWN_HOST;
605
606 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
607 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
608 IOCTL_SUBSYSTEM_COMMON;
609 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
610 COMMON_OPCODE_GET_EXTENTS_INFO;
611 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
612 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
613 sizeof (IOCTL_COMMON_EXTENTS);
614 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
615 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
616 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
617
618 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
619
620 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
621 ep->params.request.RscType = type;
622
623 return;
624
625 } /* emlxs_mb_get_extents_info() */
626
627
628 /* SLI4 */
629 /*ARGSUSED*/
630 extern void
emlxs_mb_get_extents(emlxs_hba_t * hba,MAILBOXQ * mbq,uint16_t type)631 emlxs_mb_get_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
632 {
633 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
634 IOCTL_COMMON_EXTENTS *ep;
635
636 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
637 mbq->nonembed = NULL;
638 mbq->mbox_cmpl = NULL; /* no cmpl needed */
639 mbq->port = (void *)&PPORT;
640
641 mb4->un.varSLIConfig.be.embedded = 1;
642 mb4->mbxCommand = MBX_SLI_CONFIG;
643 mb4->mbxOwner = OWN_HOST;
644
645 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
646 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
647 IOCTL_SUBSYSTEM_COMMON;
648 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
649 COMMON_OPCODE_GET_EXTENTS;
650 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
651 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
652 sizeof (IOCTL_COMMON_EXTENTS);
653 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
654 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
655 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
656
657 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
658
659 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
660 ep->params.request.RscType = type;
661
662 return;
663
664 } /* emlxs_mb_get_extents() */
665
666
667 /* SLI4 */
668 /*ARGSUSED*/
669 extern void
emlxs_mb_alloc_extents(emlxs_hba_t * hba,MAILBOXQ * mbq,uint16_t type,uint16_t count)670 emlxs_mb_alloc_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type,
671 uint16_t count)
672 {
673 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
674 IOCTL_COMMON_EXTENTS *ep;
675
676 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
677 mbq->nonembed = NULL;
678 mbq->mbox_cmpl = NULL; /* no cmpl needed */
679 mbq->port = (void *)&PPORT;
680
681 mb4->un.varSLIConfig.be.embedded = 1;
682 mb4->mbxCommand = MBX_SLI_CONFIG;
683 mb4->mbxOwner = OWN_HOST;
684
685 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
686 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
687 IOCTL_SUBSYSTEM_COMMON;
688 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
689 COMMON_OPCODE_ALLOC_EXTENTS;
690 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
691 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
692 sizeof (IOCTL_COMMON_EXTENTS);
693 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
694 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
695 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
696
697 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
698
699 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
700 ep->params.request.RscType = type;
701
702 count = min(count, MAX_EXTENTS);
703 ep->params.request.RscCnt = count;
704
705 return;
706
707 } /* emlxs_mb_alloc_extents() */
708
709
710 /* SLI4 */
711 /*ARGSUSED*/
712 extern void
emlxs_mb_dealloc_extents(emlxs_hba_t * hba,MAILBOXQ * mbq,uint16_t type)713 emlxs_mb_dealloc_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
714 {
715 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
716 IOCTL_COMMON_EXTENTS *ep;
717
718 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
719 mbq->nonembed = NULL;
720 mbq->mbox_cmpl = NULL; /* no cmpl needed */
721 mbq->port = (void *)&PPORT;
722
723 mb4->un.varSLIConfig.be.embedded = 1;
724 mb4->mbxCommand = MBX_SLI_CONFIG;
725 mb4->mbxOwner = OWN_HOST;
726
727 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
728 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
729 IOCTL_SUBSYSTEM_COMMON;
730 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
731 COMMON_OPCODE_DEALLOC_EXTENTS;
732 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
733 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
734 sizeof (IOCTL_COMMON_EXTENTS);
735 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
736 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
737 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
738
739 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
740
741 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
742 ep->params.request.RscType = type;
743
744 return;
745
746 } /* emlxs_mb_dealloc_extents() */
747
748
749 /* SLI4 */
750 /*ARGSUSED*/
751 extern void
emlxs_mb_wq_create(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t num)752 emlxs_mb_wq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
753 {
754 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
755 IOCTL_FCOE_WQ_CREATE *qp;
756 IOCTL_FCOE_WQ_CREATE_V1 *qp1;
757 uint64_t addr;
758 int i;
759
760 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
761 mbq->nonembed = NULL;
762 mbq->mbox_cmpl = NULL; /* no cmpl needed */
763 mbq->port = (void *)&PPORT;
764
765 /*
766 * Signifies an embedded command
767 */
768 mb4->un.varSLIConfig.be.embedded = 1;
769
770 mb4->mbxCommand = MBX_SLI_CONFIG;
771 mb4->mbxOwner = OWN_HOST;
772
773 switch (hba->sli.sli4.param.WQV) {
774 case 0:
775 mb4->un.varSLIConfig.be.payload_length =
776 sizeof (IOCTL_FCOE_WQ_CREATE) + IOCTL_HEADER_SZ;
777 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
778 IOCTL_SUBSYSTEM_FCOE;
779 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
780 FCOE_OPCODE_WQ_CREATE;
781 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
782 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
783 sizeof (IOCTL_FCOE_WQ_CREATE);
784 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
785
786 addr = hba->sli.sli4.wq[num].addr.phys;
787 qp = (IOCTL_FCOE_WQ_CREATE *)&mb4->un.varSLIConfig.payload;
788
789 qp->params.request.CQId = hba->sli.sli4.wq[num].cqid;
790
791 qp->params.request.NumPages = EMLXS_NUM_WQ_PAGES;
792 for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) {
793 qp->params.request.Pages[i].addrLow = PADDR_LO(addr);
794 qp->params.request.Pages[i].addrHigh = PADDR_HI(addr);
795 addr += 4096;
796 }
797
798 break;
799
800 case 1:
801 default:
802 mb4->un.varSLIConfig.be.payload_length =
803 sizeof (IOCTL_FCOE_WQ_CREATE_V1) + IOCTL_HEADER_SZ;
804 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
805 IOCTL_SUBSYSTEM_FCOE;
806 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
807 FCOE_OPCODE_WQ_CREATE;
808 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
809 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
810 sizeof (IOCTL_FCOE_WQ_CREATE_V1);
811 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
812
813 addr = hba->sli.sli4.wq[num].addr.phys;
814 qp1 = (IOCTL_FCOE_WQ_CREATE_V1 *)&mb4->un.varSLIConfig.payload;
815
816 qp1->params.request.CQId = hba->sli.sli4.wq[num].cqid;
817 qp1->params.request.NumPages = EMLXS_NUM_WQ_PAGES;
818
819 qp1->params.request.WqeCnt = WQ_DEPTH;
820 qp1->params.request.WqeSize = WQE_SIZE_64_BYTES;
821 qp1->params.request.PageSize = WQ_PAGE_SIZE_4K;
822
823 for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) {
824 qp1->params.request.Pages[i].addrLow = PADDR_LO(addr);
825 qp1->params.request.Pages[i].addrHigh = PADDR_HI(addr);
826 addr += 4096;
827 }
828
829 break;
830 }
831
832 return;
833
834 } /* emlxs_mb_wq_create() */
835
836
837 /* SLI4 */
838 /*ARGSUSED*/
839 extern void
emlxs_mb_rq_create(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t num)840 emlxs_mb_rq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
841 {
842 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
843 IOCTL_FCOE_RQ_CREATE *qp;
844 IOCTL_FCOE_RQ_CREATE_V1 *qp1;
845 uint64_t addr;
846
847 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
848 mbq->nonembed = NULL;
849 mbq->mbox_cmpl = NULL; /* no cmpl needed */
850 mbq->port = (void *)&PPORT;
851
852 /*
853 * Signifies an embedded command
854 */
855 mb4->un.varSLIConfig.be.embedded = 1;
856
857 mb4->mbxCommand = MBX_SLI_CONFIG;
858 mb4->mbxOwner = OWN_HOST;
859
860 switch (hba->sli.sli4.param.RQV) {
861 case 0:
862 mb4->un.varSLIConfig.be.payload_length =
863 sizeof (IOCTL_FCOE_RQ_CREATE) + IOCTL_HEADER_SZ;
864 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
865 IOCTL_SUBSYSTEM_FCOE;
866 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
867 FCOE_OPCODE_RQ_CREATE;
868 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
869 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
870 sizeof (IOCTL_FCOE_RQ_CREATE);
871 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
872
873 addr = hba->sli.sli4.rq[num].addr.phys;
874
875 qp = (IOCTL_FCOE_RQ_CREATE *)&mb4->un.varSLIConfig.payload;
876
877 qp->params.request.RQContext.RqeCnt = RQ_DEPTH_EXPONENT;
878 qp->params.request.RQContext.BufferSize = RQB_DATA_SIZE;
879 qp->params.request.RQContext.CQId =
880 hba->sli.sli4.rq[num].cqid;
881
882 qp->params.request.NumPages = 1;
883 qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
884 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
885
886 break;
887
888 case 1:
889 default:
890 mb4->un.varSLIConfig.be.payload_length =
891 sizeof (IOCTL_FCOE_RQ_CREATE_V1) + IOCTL_HEADER_SZ;
892 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
893 IOCTL_SUBSYSTEM_FCOE;
894 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
895 FCOE_OPCODE_RQ_CREATE;
896 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
897 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
898 sizeof (IOCTL_FCOE_RQ_CREATE_V1);
899 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
900
901 addr = hba->sli.sli4.rq[num].addr.phys;
902
903 qp1 = (IOCTL_FCOE_RQ_CREATE_V1 *)&mb4->un.varSLIConfig.payload;
904
905 qp1->params.request.RQContext.RqeCnt = RQ_DEPTH;
906 qp1->params.request.RQContext.RqeSize = RQE_SIZE_8_BYTES;
907 qp1->params.request.RQContext.PageSize = RQ_PAGE_SIZE_4K;
908
909 qp1->params.request.RQContext.BufferSize = RQB_DATA_SIZE;
910 qp1->params.request.RQContext.CQId =
911 hba->sli.sli4.rq[num].cqid;
912
913 qp1->params.request.NumPages = 1;
914 qp1->params.request.Pages[0].addrLow = PADDR_LO(addr);
915 qp1->params.request.Pages[0].addrHigh = PADDR_HI(addr);
916
917 break;
918 }
919
920 return;
921
922 } /* emlxs_mb_rq_create() */
923
924
925 /* SLI4 */
926 /*ARGSUSED*/
927 extern void
emlxs_mb_mq_create(emlxs_hba_t * hba,MAILBOXQ * mbq)928 emlxs_mb_mq_create(emlxs_hba_t *hba, MAILBOXQ *mbq)
929 {
930 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
931 IOCTL_COMMON_MQ_CREATE *qp;
932 uint64_t addr;
933
934 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
935 mbq->nonembed = NULL;
936 mbq->mbox_cmpl = NULL; /* no cmpl needed */
937 mbq->port = (void *)&PPORT;
938
939 /*
940 * Signifies an embedded command
941 */
942 mb4->un.varSLIConfig.be.embedded = 1;
943
944 mb4->mbxCommand = MBX_SLI_CONFIG;
945 mb4->mbxOwner = OWN_HOST;
946 mb4->un.varSLIConfig.be.payload_length =
947 sizeof (IOCTL_COMMON_MQ_CREATE) + IOCTL_HEADER_SZ;
948 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
949 IOCTL_SUBSYSTEM_COMMON;
950 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_MQ_CREATE;
951 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
952 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
953 sizeof (IOCTL_COMMON_MQ_CREATE);
954
955 addr = hba->sli.sli4.mq.addr.phys;
956 qp = (IOCTL_COMMON_MQ_CREATE *)&mb4->un.varSLIConfig.payload;
957
958 qp->params.request.MQContext.Size = MQ_ELEMENT_COUNT_16;
959 qp->params.request.MQContext.Valid = 1;
960 qp->params.request.MQContext.CQId = hba->sli.sli4.mq.cqid;
961
962 qp->params.request.NumPages = 1;
963 qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
964 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
965
966 return;
967
968 } /* emlxs_mb_mq_create() */
969
970
971 /* SLI4 */
972 /*ARGSUSED*/
973 extern void
emlxs_mb_mq_create_ext(emlxs_hba_t * hba,MAILBOXQ * mbq)974 emlxs_mb_mq_create_ext(emlxs_hba_t *hba, MAILBOXQ *mbq)
975 {
976 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
977 IOCTL_COMMON_MQ_CREATE_EXT *qp;
978 IOCTL_COMMON_MQ_CREATE_EXT_V1 *qp1;
979 uint64_t addr;
980
981 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
982 mbq->nonembed = NULL;
983 mbq->mbox_cmpl = NULL; /* no cmpl needed */
984 mbq->port = (void *)&PPORT;
985
986 /*
987 * Signifies an embedded command
988 */
989 mb4->un.varSLIConfig.be.embedded = 1;
990
991 mb4->mbxCommand = MBX_SLI_CONFIG;
992 mb4->mbxOwner = OWN_HOST;
993
994 switch (hba->sli.sli4.param.MQV) {
995 case 0:
996 mb4->un.varSLIConfig.be.payload_length =
997 sizeof (IOCTL_COMMON_MQ_CREATE_EXT) + IOCTL_HEADER_SZ;
998 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
999 IOCTL_SUBSYSTEM_COMMON;
1000 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
1001 COMMON_OPCODE_MQ_CREATE_EXT;
1002 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
1003 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
1004 sizeof (IOCTL_COMMON_MQ_CREATE_EXT);
1005 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
1006
1007 addr = hba->sli.sli4.mq.addr.phys;
1008 qp = (IOCTL_COMMON_MQ_CREATE_EXT *)
1009 &mb4->un.varSLIConfig.payload;
1010
1011 qp->params.request.num_pages = 1;
1012 qp->params.request.async_event_bitmap =
1013 ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT;
1014 qp->params.request.context.Size = MQ_ELEMENT_COUNT_16;
1015 qp->params.request.context.Valid = 1;
1016 qp->params.request.context.CQId = hba->sli.sli4.mq.cqid;
1017
1018 qp->params.request.pages[0].addrLow = PADDR_LO(addr);
1019 qp->params.request.pages[0].addrHigh = PADDR_HI(addr);
1020
1021 break;
1022
1023 case 1:
1024 default:
1025 mb4->un.varSLIConfig.be.payload_length =
1026 sizeof (IOCTL_COMMON_MQ_CREATE_EXT_V1) + IOCTL_HEADER_SZ;
1027 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
1028 IOCTL_SUBSYSTEM_COMMON;
1029 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
1030 COMMON_OPCODE_MQ_CREATE_EXT;
1031 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
1032 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
1033 sizeof (IOCTL_COMMON_MQ_CREATE_EXT_V1);
1034 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
1035
1036 addr = hba->sli.sli4.mq.addr.phys;
1037 qp1 = (IOCTL_COMMON_MQ_CREATE_EXT_V1 *)
1038 &mb4->un.varSLIConfig.payload;
1039
1040 qp1->params.request.num_pages = 1;
1041 qp1->params.request.async_event_bitmap =
1042 ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT |
1043 ASYNC_FC_EVENT | ASYNC_PORT_EVENT;
1044 qp1->params.request.context.Size = MQ_ELEMENT_COUNT_16;
1045 qp1->params.request.context.Valid = 1;
1046 qp1->params.request.CQId = hba->sli.sli4.mq.cqid;
1047
1048 qp1->params.request.pages[0].addrLow = PADDR_LO(addr);
1049 qp1->params.request.pages[0].addrHigh = PADDR_HI(addr);
1050
1051 break;
1052 }
1053
1054 return;
1055
1056 } /* emlxs_mb_mq_create_ext() */
1057
1058
1059 /*ARGSUSED*/
1060 extern void
emlxs_mb_async_event(emlxs_hba_t * hba,MAILBOXQ * mbq)1061 emlxs_mb_async_event(emlxs_hba_t *hba, MAILBOXQ *mbq)
1062 {
1063 MAILBOX *mb = (MAILBOX *)mbq;
1064
1065 bzero((void *) mb, MAILBOX_CMD_BSIZE);
1066
1067 mb->mbxCommand = MBX_ASYNC_EVENT;
1068 mb->mbxOwner = OWN_HOST;
1069 mb->un.varWords[0] = hba->channel_els;
1070 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1071 mbq->port = (void *)&PPORT;
1072
1073 return;
1074
1075 } /* emlxs_mb_async_event() */
1076
1077
1078 /*ARGSUSED*/
1079 extern void
emlxs_mb_heartbeat(emlxs_hba_t * hba,MAILBOXQ * mbq)1080 emlxs_mb_heartbeat(emlxs_hba_t *hba, MAILBOXQ *mbq)
1081 {
1082 MAILBOX *mb = (MAILBOX *)mbq;
1083
1084 bzero((void *) mb, MAILBOX_CMD_BSIZE);
1085
1086 mb->mbxCommand = MBX_HEARTBEAT;
1087 mb->mbxOwner = OWN_HOST;
1088 mbq->mbox_cmpl = NULL; /* no cmpl needed for hbeat */
1089 mbq->port = (void *)&PPORT;
1090
1091 return;
1092
1093 } /* emlxs_mb_heartbeat() */
1094
1095
1096 /*ARGSUSED*/
1097 extern void
emlxs_mb_gpio_write(emlxs_hba_t * hba,MAILBOXQ * mbq,uint8_t pin,uint8_t val)1098 emlxs_mb_gpio_write(emlxs_hba_t *hba, MAILBOXQ *mbq, uint8_t pin, uint8_t val)
1099 {
1100 emlxs_port_t *port = &PPORT;
1101 MAILBOX4 *mb4;
1102 be_req_hdr_t *be_req;
1103 mbox_req_hdr_t *hdr_req;
1104 IOCTL_LOWLEVEL_GPIO_RDWR *gpio;
1105
1106 bzero((void *) mbq, sizeof (MAILBOXQ));
1107
1108 mbq->port = port;
1109
1110 mb4 = (MAILBOX4 *)mbq->mbox;
1111 mb4->mbxCommand = MBX_SLI_CONFIG;
1112 mb4->mbxOwner = OWN_HOST;
1113
1114 be_req = (be_req_hdr_t *)&mb4->un.varSLIConfig.be;
1115 be_req->embedded = 1;
1116 be_req->payload_length = sizeof (mbox_req_hdr_t) +
1117 sizeof (IOCTL_LOWLEVEL_GPIO_RDWR);
1118
1119 hdr_req = &be_req->un_hdr.hdr_req;
1120 hdr_req->subsystem = IOCTL_SUBSYSTEM_LOWLEVEL;
1121 hdr_req->opcode = LOWLEVEL_OPCODE_GPIO_RDWR;
1122 hdr_req->timeout = 0;
1123 hdr_req->req_length = sizeof (IOCTL_LOWLEVEL_GPIO_RDWR);
1124
1125 gpio = (IOCTL_LOWLEVEL_GPIO_RDWR *)&mb4->un.varSLIConfig.payload;
1126 gpio->params.request.GpioAction = LOWLEVEL_GPIO_ACT_WRITE;
1127 gpio->params.request.LogicalPin = pin;
1128 gpio->params.request.PinValue = val;
1129 } /* emlxs_mb_gpio_write */
1130
1131 #ifdef MSI_SUPPORT
1132
1133 /*ARGSUSED*/
1134 extern void
emlxs_mb_config_msi(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t * intr_map,uint32_t intr_count)1135 emlxs_mb_config_msi(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map,
1136 uint32_t intr_count)
1137 {
1138 MAILBOX *mb = (MAILBOX *)mbq;
1139 uint16_t i;
1140 uint32_t mask;
1141
1142 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1143
1144 mb->mbxCommand = MBX_CONFIG_MSI;
1145
1146 /* Set the default message id to zero */
1147 mb->un.varCfgMSI.defaultPresent = 1;
1148 mb->un.varCfgMSI.defaultMessageNumber = 0;
1149
1150 for (i = 1; i < intr_count; i++) {
1151 mask = intr_map[i];
1152
1153 mb->un.varCfgMSI.attConditions |= mask;
1154
1155 #ifdef EMLXS_BIG_ENDIAN
1156 if (mask & HA_R0ATT) {
1157 mb->un.varCfgMSI.messageNumberByHA[3] = i;
1158 }
1159 if (mask & HA_R1ATT) {
1160 mb->un.varCfgMSI.messageNumberByHA[7] = i;
1161 }
1162 if (mask & HA_R2ATT) {
1163 mb->un.varCfgMSI.messageNumberByHA[11] = i;
1164 }
1165 if (mask & HA_R3ATT) {
1166 mb->un.varCfgMSI.messageNumberByHA[15] = i;
1167 }
1168 if (mask & HA_LATT) {
1169 mb->un.varCfgMSI.messageNumberByHA[29] = i;
1170 }
1171 if (mask & HA_MBATT) {
1172 mb->un.varCfgMSI.messageNumberByHA[30] = i;
1173 }
1174 if (mask & HA_ERATT) {
1175 mb->un.varCfgMSI.messageNumberByHA[31] = i;
1176 }
1177 #endif /* EMLXS_BIG_ENDIAN */
1178
1179 #ifdef EMLXS_LITTLE_ENDIAN
1180 /* Accounts for half word swap of LE architecture */
1181 if (mask & HA_R0ATT) {
1182 mb->un.varCfgMSI.messageNumberByHA[2] = i;
1183 }
1184 if (mask & HA_R1ATT) {
1185 mb->un.varCfgMSI.messageNumberByHA[6] = i;
1186 }
1187 if (mask & HA_R2ATT) {
1188 mb->un.varCfgMSI.messageNumberByHA[10] = i;
1189 }
1190 if (mask & HA_R3ATT) {
1191 mb->un.varCfgMSI.messageNumberByHA[14] = i;
1192 }
1193 if (mask & HA_LATT) {
1194 mb->un.varCfgMSI.messageNumberByHA[28] = i;
1195 }
1196 if (mask & HA_MBATT) {
1197 mb->un.varCfgMSI.messageNumberByHA[31] = i;
1198 }
1199 if (mask & HA_ERATT) {
1200 mb->un.varCfgMSI.messageNumberByHA[30] = i;
1201 }
1202 #endif /* EMLXS_LITTLE_ENDIAN */
1203 }
1204
1205 mb->mbxOwner = OWN_HOST;
1206 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1207 mbq->port = (void *)&PPORT;
1208
1209 return;
1210
1211 } /* emlxs_mb_config_msi() */
1212
1213
1214 /*ARGSUSED*/
1215 extern void
emlxs_mb_config_msix(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t * intr_map,uint32_t intr_count)1216 emlxs_mb_config_msix(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map,
1217 uint32_t intr_count)
1218 {
1219 MAILBOX *mb = (MAILBOX *)mbq;
1220 uint8_t i;
1221 uint32_t mask;
1222
1223 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1224
1225 mb->mbxCommand = MBX_CONFIG_MSIX;
1226
1227 /* Set the default message id to zero */
1228 mb->un.varCfgMSIX.defaultPresent = 1;
1229 mb->un.varCfgMSIX.defaultMessageNumber = 0;
1230
1231 for (i = 1; i < intr_count; i++) {
1232 mask = intr_map[i];
1233
1234 mb->un.varCfgMSIX.attConditions1 |= mask;
1235
1236 #ifdef EMLXS_BIG_ENDIAN
1237 if (mask & HA_R0ATT) {
1238 mb->un.varCfgMSIX.messageNumberByHA[3] = i;
1239 }
1240 if (mask & HA_R1ATT) {
1241 mb->un.varCfgMSIX.messageNumberByHA[7] = i;
1242 }
1243 if (mask & HA_R2ATT) {
1244 mb->un.varCfgMSIX.messageNumberByHA[11] = i;
1245 }
1246 if (mask & HA_R3ATT) {
1247 mb->un.varCfgMSIX.messageNumberByHA[15] = i;
1248 }
1249 if (mask & HA_LATT) {
1250 mb->un.varCfgMSIX.messageNumberByHA[29] = i;
1251 }
1252 if (mask & HA_MBATT) {
1253 mb->un.varCfgMSIX.messageNumberByHA[30] = i;
1254 }
1255 if (mask & HA_ERATT) {
1256 mb->un.varCfgMSIX.messageNumberByHA[31] = i;
1257 }
1258 #endif /* EMLXS_BIG_ENDIAN */
1259
1260 #ifdef EMLXS_LITTLE_ENDIAN
1261 /* Accounts for word swap of LE architecture */
1262 if (mask & HA_R0ATT) {
1263 mb->un.varCfgMSIX.messageNumberByHA[0] = i;
1264 }
1265 if (mask & HA_R1ATT) {
1266 mb->un.varCfgMSIX.messageNumberByHA[4] = i;
1267 }
1268 if (mask & HA_R2ATT) {
1269 mb->un.varCfgMSIX.messageNumberByHA[8] = i;
1270 }
1271 if (mask & HA_R3ATT) {
1272 mb->un.varCfgMSIX.messageNumberByHA[12] = i;
1273 }
1274 if (mask & HA_LATT) {
1275 mb->un.varCfgMSIX.messageNumberByHA[30] = i;
1276 }
1277 if (mask & HA_MBATT) {
1278 mb->un.varCfgMSIX.messageNumberByHA[29] = i;
1279 }
1280 if (mask & HA_ERATT) {
1281 mb->un.varCfgMSIX.messageNumberByHA[28] = i;
1282 }
1283 #endif /* EMLXS_LITTLE_ENDIAN */
1284 }
1285
1286 mb->mbxOwner = OWN_HOST;
1287 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1288 mbq->port = (void *)&PPORT;
1289
1290 return;
1291
1292 } /* emlxs_mb_config_msix() */
1293
1294
1295 #endif /* MSI_SUPPORT */
1296
1297
1298 /*ARGSUSED*/
1299 extern void
emlxs_mb_reset_ring(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t ringno)1300 emlxs_mb_reset_ring(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t ringno)
1301 {
1302 MAILBOX *mb = (MAILBOX *)mbq;
1303
1304 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1305
1306 mb->mbxCommand = MBX_RESET_RING;
1307 mb->un.varRstRing.ring_no = ringno;
1308 mb->mbxOwner = OWN_HOST;
1309 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1310 mbq->port = (void *)&PPORT;
1311
1312 return;
1313
1314 } /* emlxs_mb_reset_ring() */
1315
1316
1317 /*ARGSUSED*/
1318 extern void
emlxs_mb_dump_vpd(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t offset)1319 emlxs_mb_dump_vpd(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset)
1320 {
1321
1322 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1323 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1324
1325 /* Clear the local dump_region */
1326 bzero(hba->sli.sli4.dump_region.virt,
1327 hba->sli.sli4.dump_region.size);
1328
1329 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1330
1331 mb4->mbxCommand = MBX_DUMP_MEMORY;
1332 mb4->un.varDmp4.type = DMP_NV_PARAMS;
1333 mb4->un.varDmp4.entry_index = offset;
1334 mb4->un.varDmp4.region_id = DMP_VPD_REGION;
1335
1336 mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size;
1337 mb4->un.varDmp4.addrHigh =
1338 PADDR_HI(hba->sli.sli4.dump_region.phys);
1339 mb4->un.varDmp4.addrLow =
1340 PADDR_LO(hba->sli.sli4.dump_region.phys);
1341 mb4->un.varDmp4.rsp_cnt = 0;
1342
1343 mb4->mbxOwner = OWN_HOST;
1344
1345 } else {
1346 MAILBOX *mb = (MAILBOX *)mbq;
1347
1348 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1349
1350 mb->mbxCommand = MBX_DUMP_MEMORY;
1351 mb->un.varDmp.cv = 1;
1352 mb->un.varDmp.type = DMP_NV_PARAMS;
1353 mb->un.varDmp.entry_index = offset;
1354 mb->un.varDmp.region_id = DMP_VPD_REGION;
1355
1356 /* limited by mailbox size */
1357 mb->un.varDmp.word_cnt = DMP_VPD_DUMP_WCOUNT;
1358
1359 mb->un.varDmp.co = 0;
1360 mb->un.varDmp.resp_offset = 0;
1361 mb->mbxOwner = OWN_HOST;
1362 }
1363
1364 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1365 mbq->port = (void *)&PPORT;
1366
1367 } /* emlxs_mb_dump_vpd() */
1368
1369
1370 /* SLI4 */
1371 /*ARGSUSED*/
1372 extern void
emlxs_mb_dump_fcoe(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t offset)1373 emlxs_mb_dump_fcoe(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset)
1374 {
1375 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1376
1377 if (hba->sli_mode < EMLXS_HBA_SLI4_MODE) {
1378 return;
1379 }
1380
1381 /* Clear the local dump_region */
1382 bzero(hba->sli.sli4.dump_region.virt,
1383 hba->sli.sli4.dump_region.size);
1384
1385 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1386
1387 mb4->mbxCommand = MBX_DUMP_MEMORY;
1388 mb4->un.varDmp4.type = DMP_NV_PARAMS;
1389 mb4->un.varDmp4.entry_index = offset;
1390 mb4->un.varDmp4.region_id = DMP_FCOE_REGION;
1391
1392 mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size;
1393 mb4->un.varDmp4.addrHigh =
1394 PADDR_HI(hba->sli.sli4.dump_region.phys);
1395 mb4->un.varDmp4.addrLow =
1396 PADDR_LO(hba->sli.sli4.dump_region.phys);
1397 mb4->un.varDmp4.rsp_cnt = 0;
1398
1399 mb4->mbxOwner = OWN_HOST;
1400
1401 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1402 mbq->port = (void *)&PPORT;
1403
1404 } /* emlxs_mb_dump_fcoe() */
1405
1406
1407 /*ARGSUSED*/
1408 extern void
emlxs_mb_dump(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t offset,uint32_t words)1409 emlxs_mb_dump(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset, uint32_t words)
1410 {
1411
1412 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1413 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1414
1415 /* Clear the local dump_region */
1416 bzero(hba->sli.sli4.dump_region.virt,
1417 hba->sli.sli4.dump_region.size);
1418
1419 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1420
1421 mb4->mbxCommand = MBX_DUMP_MEMORY;
1422 mb4->un.varDmp4.type = DMP_MEM_REG;
1423 mb4->un.varDmp4.entry_index = offset;
1424 mb4->un.varDmp4.region_id = 0;
1425
1426 mb4->un.varDmp4.available_cnt = min((words*4),
1427 hba->sli.sli4.dump_region.size);
1428 mb4->un.varDmp4.addrHigh =
1429 PADDR_HI(hba->sli.sli4.dump_region.phys);
1430 mb4->un.varDmp4.addrLow =
1431 PADDR_LO(hba->sli.sli4.dump_region.phys);
1432 mb4->un.varDmp4.rsp_cnt = 0;
1433
1434 mb4->mbxOwner = OWN_HOST;
1435
1436 } else {
1437
1438 MAILBOX *mb = (MAILBOX *)mbq;
1439
1440 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1441
1442 mb->mbxCommand = MBX_DUMP_MEMORY;
1443 mb->un.varDmp.type = DMP_MEM_REG;
1444 mb->un.varDmp.word_cnt = words;
1445 mb->un.varDmp.base_adr = offset;
1446
1447 mb->un.varDmp.co = 0;
1448 mb->un.varDmp.resp_offset = 0;
1449 mb->mbxOwner = OWN_HOST;
1450 }
1451
1452 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1453 mbq->port = (void *)&PPORT;
1454
1455 return;
1456
1457 } /* emlxs_mb_dump() */
1458
1459
1460 /*
1461 * emlxs_mb_read_nv Issue a READ NVPARAM mailbox command
1462 */
1463 /*ARGSUSED*/
1464 extern void
emlxs_mb_read_nv(emlxs_hba_t * hba,MAILBOXQ * mbq)1465 emlxs_mb_read_nv(emlxs_hba_t *hba, MAILBOXQ *mbq)
1466 {
1467 MAILBOX *mb = (MAILBOX *)mbq;
1468
1469 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1470
1471 mb->mbxCommand = MBX_READ_NV;
1472 mb->mbxOwner = OWN_HOST;
1473 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1474 mbq->port = (void *)&PPORT;
1475
1476 } /* emlxs_mb_read_nv() */
1477
1478
1479 /*
1480 * emlxs_mb_read_rev Issue a READ REV mailbox command
1481 */
1482 /*ARGSUSED*/
1483 extern void
emlxs_mb_read_rev(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t v3)1484 emlxs_mb_read_rev(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t v3)
1485 {
1486 MAILBOX *mb = (MAILBOX *)mbq;
1487
1488 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1489 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
1490 mbq->nonembed = NULL;
1491 } else {
1492 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1493
1494 mb->un.varRdRev.cv = 1;
1495
1496 if (v3) {
1497 mb->un.varRdRev.cv3 = 1;
1498 }
1499 }
1500
1501 mb->mbxCommand = MBX_READ_REV;
1502 mb->mbxOwner = OWN_HOST;
1503 mbq->mbox_cmpl = NULL;
1504 mbq->port = (void *)&PPORT;
1505
1506 } /* emlxs_mb_read_rev() */
1507
1508
1509 /*
1510 * emlxs_mb_run_biu_diag Issue a RUN_BIU_DIAG mailbox command
1511 */
1512 /*ARGSUSED*/
1513 extern uint32_t
emlxs_mb_run_biu_diag(emlxs_hba_t * hba,MAILBOXQ * mbq,uint64_t out,uint64_t in)1514 emlxs_mb_run_biu_diag(emlxs_hba_t *hba, MAILBOXQ *mbq, uint64_t out,
1515 uint64_t in)
1516 {
1517 MAILBOX *mb = (MAILBOX *)mbq;
1518
1519 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1520
1521 mb->mbxCommand = MBX_RUN_BIU_DIAG64;
1522 mb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE;
1523 mb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = PADDR_HI(out);
1524 mb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = PADDR_LO(out);
1525 mb->un.varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE;
1526 mb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = PADDR_HI(in);
1527 mb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = PADDR_LO(in);
1528 mb->mbxOwner = OWN_HOST;
1529 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1530 mbq->port = (void *)&PPORT;
1531
1532 return (0);
1533 } /* emlxs_mb_run_biu_diag() */
1534
1535
1536 /* This should only be called with active MBX_NOWAIT mailboxes */
1537 void
emlxs_mb_retry(emlxs_hba_t * hba,MAILBOXQ * mbq)1538 emlxs_mb_retry(emlxs_hba_t *hba, MAILBOXQ *mbq)
1539 {
1540 MAILBOX *mb;
1541 MAILBOX *mbox;
1542 int rc;
1543
1544 mbox = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX);
1545 if (!mbox) {
1546 return;
1547 }
1548 mb = (MAILBOX *)mbq;
1549 bcopy((uint8_t *)mb, (uint8_t *)mbox, MAILBOX_CMD_BSIZE);
1550 mbox->mbxOwner = OWN_HOST;
1551 mbox->mbxStatus = 0;
1552
1553 mutex_enter(&EMLXS_PORT_LOCK);
1554
1555 HBASTATS.MboxCompleted++;
1556
1557 if (mb->mbxStatus != 0) {
1558 HBASTATS.MboxError++;
1559 } else {
1560 HBASTATS.MboxGood++;
1561 }
1562
1563 hba->mbox_mbq = NULL;
1564 hba->mbox_queue_flag = 0;
1565
1566 mutex_exit(&EMLXS_PORT_LOCK);
1567
1568 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
1569 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1570 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
1571 }
1572 return;
1573
1574 } /* emlxs_mb_retry() */
1575
1576
1577 /* SLI3 */
1578 static uint32_t
emlxs_read_la_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)1579 emlxs_read_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1580 {
1581 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1582 MAILBOX *mb;
1583 MAILBOXQ *mbox;
1584 MATCHMAP *mp;
1585 READ_LA_VAR la;
1586 int i;
1587 uint32_t control;
1588
1589 mb = (MAILBOX *)mbq;
1590 if (mb->mbxStatus) {
1591 if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
1592 control = mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize;
1593 if (control == 0) {
1594 (void) emlxs_mb_read_la(hba, mbq);
1595 }
1596 emlxs_mb_retry(hba, mbq);
1597 return (1);
1598 }
1599 /* Enable Link Attention interrupts */
1600 mutex_enter(&EMLXS_PORT_LOCK);
1601
1602 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1603 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1604 WRITE_CSR_REG(hba, FC_HC_REG(hba),
1605 hba->sli.sli3.hc_copy);
1606 #ifdef FMA_SUPPORT
1607 /* Access handle validation */
1608 EMLXS_CHK_ACC_HANDLE(hba,
1609 hba->sli.sli3.csr_acc_handle);
1610 #endif /* FMA_SUPPORT */
1611 }
1612
1613 mutex_exit(&EMLXS_PORT_LOCK);
1614 return (0);
1615 }
1616 bcopy((void *)&mb->un.varReadLA, (void *)&la, sizeof (READ_LA_VAR));
1617
1618 mp = (MATCHMAP *)mbq->bp;
1619 if (mp) {
1620 bcopy((caddr_t)mp->virt, (caddr_t)port->alpa_map, 128);
1621 } else {
1622 bzero((caddr_t)port->alpa_map, 128);
1623 }
1624
1625 if (la.attType == AT_LINK_UP) {
1626 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkup_atten_msg,
1627 "tag=%d -> %d ALPA=%x",
1628 (uint32_t)hba->link_event_tag,
1629 (uint32_t)la.eventTag,
1630 (uint32_t)la.granted_AL_PA);
1631 } else {
1632 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkdown_atten_msg,
1633 "tag=%d -> %d ALPA=%x",
1634 (uint32_t)hba->link_event_tag,
1635 (uint32_t)la.eventTag,
1636 (uint32_t)la.granted_AL_PA);
1637 }
1638
1639 if (la.pb) {
1640 hba->flag |= FC_BYPASSED_MODE;
1641 } else {
1642 hba->flag &= ~FC_BYPASSED_MODE;
1643 }
1644
1645 if (hba->link_event_tag == la.eventTag) {
1646 HBASTATS.LinkMultiEvent++;
1647 } else if (hba->link_event_tag + 1 < la.eventTag) {
1648 HBASTATS.LinkMultiEvent++;
1649
1650 /* Make sure link is declared down */
1651 emlxs_linkdown(hba);
1652 }
1653
1654 hba->link_event_tag = la.eventTag;
1655 port->lip_type = 0;
1656
1657 /* If link not already up then declare it up now */
1658 if ((la.attType == AT_LINK_UP) && (hba->state < FC_LINK_UP)) {
1659
1660 #ifdef MENLO_SUPPORT
1661 if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX &&
1662 hba->model_info.device_id == PCI_DEVICE_ID_HORNET &&
1663 (hba->flag & (FC_ILB_MODE | FC_ELB_MODE))) {
1664 la.topology = TOPOLOGY_LOOP;
1665 la.granted_AL_PA = 0;
1666 port->alpa_map[0] = 1;
1667 port->alpa_map[1] = 0;
1668 la.lipType = LT_PORT_INIT;
1669 }
1670 #endif /* MENLO_SUPPORT */
1671 /* Save the linkspeed */
1672 hba->linkspeed = la.UlnkSpeed;
1673
1674 /* Check for old model adapters that only */
1675 /* supported 1Gb */
1676 if ((hba->linkspeed == 0) &&
1677 (hba->model_info.chip & EMLXS_DRAGONFLY_CHIP)) {
1678 hba->linkspeed = LA_1GHZ_LINK;
1679 }
1680
1681 if ((hba->topology = la.topology) == TOPOLOGY_LOOP) {
1682 port->granted_alpa = la.granted_AL_PA;
1683 port->did = port->granted_alpa;
1684 port->lip_type = la.lipType;
1685 if (hba->flag & FC_SLIM2_MODE) {
1686 i = la.un.lilpBde64.tus.f.bdeSize;
1687 } else {
1688 i = la.un.lilpBde.bdeSize;
1689 }
1690
1691 if (i == 0) {
1692 port->alpa_map[0] = 0;
1693 } else {
1694 uint8_t *alpa_map;
1695 uint32_t j;
1696
1697 /* Check number of devices in map */
1698 if (port->alpa_map[0] > 127) {
1699 port->alpa_map[0] = 127;
1700 }
1701
1702 alpa_map = (uint8_t *)port->alpa_map;
1703
1704 EMLXS_MSGF(EMLXS_CONTEXT,
1705 &emlxs_link_atten_msg,
1706 "alpa_map: %d device(s): "
1707 "%02x %02x %02x %02x %02x %02x "
1708 "%02x", alpa_map[0], alpa_map[1],
1709 alpa_map[2], alpa_map[3],
1710 alpa_map[4], alpa_map[5],
1711 alpa_map[6], alpa_map[7]);
1712
1713 for (j = 8; j <= alpa_map[0]; j += 8) {
1714 EMLXS_MSGF(EMLXS_CONTEXT,
1715 &emlxs_link_atten_msg,
1716 "alpa_map: "
1717 "%02x %02x %02x %02x %02x "
1718 "%02x %02x %02x",
1719 alpa_map[j],
1720 alpa_map[j + 1],
1721 alpa_map[j + 2],
1722 alpa_map[j + 3],
1723 alpa_map[j + 4],
1724 alpa_map[j + 5],
1725 alpa_map[j + 6],
1726 alpa_map[j + 7]);
1727 }
1728 }
1729 }
1730 #ifdef MENLO_SUPPORT
1731 /* Check if Menlo maintenance mode is enabled */
1732 if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX &&
1733 hba->model_info.device_id == PCI_DEVICE_ID_HORNET) {
1734 if (la.mm == 1) {
1735 EMLXS_MSGF(EMLXS_CONTEXT,
1736 &emlxs_link_atten_msg,
1737 "Maintenance Mode enabled.");
1738
1739 mutex_enter(&EMLXS_PORT_LOCK);
1740 hba->flag |= FC_MENLO_MODE;
1741 mutex_exit(&EMLXS_PORT_LOCK);
1742
1743 mutex_enter(&EMLXS_LINKUP_LOCK);
1744 cv_broadcast(&EMLXS_LINKUP_CV);
1745 mutex_exit(&EMLXS_LINKUP_LOCK);
1746 } else {
1747 EMLXS_MSGF(EMLXS_CONTEXT,
1748 &emlxs_link_atten_msg,
1749 "Maintenance Mode disabled.");
1750 }
1751
1752 /* Check FCoE attention bit */
1753 if (la.fa == 1) {
1754 emlxs_thread_spawn(hba,
1755 emlxs_fcoe_attention_thread,
1756 0, 0);
1757 }
1758 }
1759 #endif /* MENLO_SUPPORT */
1760
1761 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1762 MEM_MBOX))) {
1763 /* This should turn on DELAYED ABTS for */
1764 /* ELS timeouts */
1765 emlxs_mb_set_var(hba, mbox, 0x00052198, 0x1);
1766
1767 emlxs_mb_put(hba, mbox);
1768 }
1769
1770 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1771 MEM_MBOX))) {
1772 /* If link not already down then */
1773 /* declare it down now */
1774 if (emlxs_mb_read_sparam(hba, mbox) == 0) {
1775 emlxs_mb_put(hba, mbox);
1776 } else {
1777 emlxs_mem_put(hba, MEM_MBOX,
1778 (void *)mbox);
1779 }
1780 }
1781
1782 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1783 MEM_MBOX))) {
1784 emlxs_mb_config_link(hba, mbox);
1785
1786 emlxs_mb_put(hba, mbox);
1787 }
1788
1789 /* Declare the linkup here */
1790 emlxs_linkup(hba);
1791 }
1792
1793 /* If link not already down then declare it down now */
1794 else if (la.attType == AT_LINK_DOWN) {
1795 /* Make sure link is declared down */
1796 emlxs_linkdown(hba);
1797 }
1798
1799 /* Enable Link attention interrupt */
1800 mutex_enter(&EMLXS_PORT_LOCK);
1801
1802 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1803 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1804 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1805 #ifdef FMA_SUPPORT
1806 /* Access handle validation */
1807 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
1808 #endif /* FMA_SUPPORT */
1809 }
1810
1811 mutex_exit(&EMLXS_PORT_LOCK);
1812
1813 return (0);
1814
1815 } /* emlxs_read_la_mbcmpl() */
1816
1817
1818 extern uint32_t
emlxs_mb_read_la(emlxs_hba_t * hba,MAILBOXQ * mbq)1819 emlxs_mb_read_la(emlxs_hba_t *hba, MAILBOXQ *mbq)
1820 {
1821 MAILBOX *mb = (MAILBOX *)mbq;
1822 MATCHMAP *mp;
1823
1824 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1825
1826 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
1827 mb->mbxCommand = MBX_READ_LA64;
1828
1829 return (1);
1830 }
1831
1832 mb->mbxCommand = MBX_READ_LA64;
1833 mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
1834 mb->un.varReadLA.un.lilpBde64.addrHigh = PADDR_HI(mp->phys);
1835 mb->un.varReadLA.un.lilpBde64.addrLow = PADDR_LO(mp->phys);
1836 mb->mbxOwner = OWN_HOST;
1837 mbq->mbox_cmpl = emlxs_read_la_mbcmpl;
1838 mbq->port = (void *)&PPORT;
1839
1840 /*
1841 * save address for completion
1842 */
1843 mbq->bp = (void *)mp;
1844
1845 return (0);
1846
1847 } /* emlxs_mb_read_la() */
1848
1849
1850 /* SLI3 */
1851 static uint32_t
emlxs_clear_la_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)1852 emlxs_clear_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1853 {
1854 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1855 MAILBOX *mb;
1856 MAILBOXQ *mbox;
1857 emlxs_port_t *vport;
1858 uint32_t la_enable;
1859 int i, rc;
1860
1861 mb = (MAILBOX *)mbq;
1862 if (mb->mbxStatus) {
1863 la_enable = 1;
1864
1865 if (mb->mbxStatus == 0x1601) {
1866 /* Get a buffer which will be used for */
1867 /* mailbox commands */
1868 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1869 MEM_MBOX))) {
1870 /* Get link attention message */
1871 if (emlxs_mb_read_la(hba, mbox) == 0) {
1872 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
1873 (MAILBOX *)mbox, MBX_NOWAIT, 0);
1874 if ((rc != MBX_BUSY) &&
1875 (rc != MBX_SUCCESS)) {
1876 emlxs_mem_put(hba,
1877 MEM_MBOX, (void *)mbox);
1878 }
1879 la_enable = 0;
1880 } else {
1881 emlxs_mem_put(hba, MEM_MBOX,
1882 (void *)mbox);
1883 }
1884 }
1885 }
1886
1887 mutex_enter(&EMLXS_PORT_LOCK);
1888 if (la_enable) {
1889 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1890 /* Enable Link Attention interrupts */
1891 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1892 WRITE_CSR_REG(hba, FC_HC_REG(hba),
1893 hba->sli.sli3.hc_copy);
1894 #ifdef FMA_SUPPORT
1895 /* Access handle validation */
1896 EMLXS_CHK_ACC_HANDLE(hba,
1897 hba->sli.sli3.csr_acc_handle);
1898 #endif /* FMA_SUPPORT */
1899 }
1900 } else {
1901 if (hba->sli.sli3.hc_copy & HC_LAINT_ENA) {
1902 /* Disable Link Attention interrupts */
1903 hba->sli.sli3.hc_copy &= ~HC_LAINT_ENA;
1904 WRITE_CSR_REG(hba, FC_HC_REG(hba),
1905 hba->sli.sli3.hc_copy);
1906 #ifdef FMA_SUPPORT
1907 /* Access handle validation */
1908 EMLXS_CHK_ACC_HANDLE(hba,
1909 hba->sli.sli3.csr_acc_handle);
1910 #endif /* FMA_SUPPORT */
1911 }
1912 }
1913 mutex_exit(&EMLXS_PORT_LOCK);
1914
1915 return (0);
1916 }
1917 /* Enable on Link Attention interrupts */
1918 mutex_enter(&EMLXS_PORT_LOCK);
1919
1920 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1921 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1922 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1923 #ifdef FMA_SUPPORT
1924 /* Access handle validation */
1925 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
1926 #endif /* FMA_SUPPORT */
1927 }
1928
1929 if (hba->state >= FC_LINK_UP) {
1930 EMLXS_STATE_CHANGE_LOCKED(hba, FC_READY);
1931 }
1932
1933 mutex_exit(&EMLXS_PORT_LOCK);
1934
1935 /* Adapter is now ready for FCP traffic */
1936 if (hba->state == FC_READY) {
1937
1938 /* Register vpi's for all ports that have did's */
1939 for (i = 0; i < MAX_VPORTS; i++) {
1940 vport = &VPORT(i);
1941
1942 if (!(vport->flag & EMLXS_PORT_BOUND) ||
1943 !(vport->did)) {
1944 continue;
1945 }
1946
1947 (void) emlxs_mb_reg_vpi(vport, NULL);
1948 }
1949
1950 /* Attempt to send any pending IO */
1951 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[hba->channel_fcp], 0);
1952 }
1953 return (0);
1954
1955 } /* emlxs_clear_la_mbcmpl() */
1956
1957
1958 /* SLI3 */
1959 extern void
emlxs_mb_clear_la(emlxs_hba_t * hba,MAILBOXQ * mbq)1960 emlxs_mb_clear_la(emlxs_hba_t *hba, MAILBOXQ *mbq)
1961 {
1962 MAILBOX *mb = (MAILBOX *)mbq;
1963
1964 #ifdef FC_RPI_CHECK
1965 emlxs_rpi_check(hba);
1966 #endif /* FC_RPI_CHECK */
1967
1968 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1969
1970 mb->un.varClearLA.eventTag = hba->link_event_tag;
1971 mb->mbxCommand = MBX_CLEAR_LA;
1972 mb->mbxOwner = OWN_HOST;
1973 mbq->mbox_cmpl = emlxs_clear_la_mbcmpl;
1974 mbq->port = (void *)&PPORT;
1975
1976 return;
1977
1978 } /* emlxs_mb_clear_la() */
1979
1980
1981 /*
1982 * emlxs_mb_read_status Issue a READ STATUS mailbox command
1983 */
1984 /*ARGSUSED*/
1985 extern void
emlxs_mb_read_status(emlxs_hba_t * hba,MAILBOXQ * mbq)1986 emlxs_mb_read_status(emlxs_hba_t *hba, MAILBOXQ *mbq)
1987 {
1988 MAILBOX *mb = (MAILBOX *)mbq;
1989
1990 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1991
1992 mb->mbxCommand = MBX_READ_STATUS;
1993 mb->mbxOwner = OWN_HOST;
1994 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1995 mbq->port = (void *)&PPORT;
1996
1997 } /* fc_read_status() */
1998
1999
2000 /*
2001 * emlxs_mb_read_lnk_stat Issue a LINK STATUS mailbox command
2002 */
2003 /*ARGSUSED*/
2004 extern void
emlxs_mb_read_lnk_stat(emlxs_hba_t * hba,MAILBOXQ * mbq)2005 emlxs_mb_read_lnk_stat(emlxs_hba_t *hba, MAILBOXQ *mbq)
2006 {
2007 MAILBOX *mb = (MAILBOX *)mbq;
2008
2009 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2010
2011 mb->mbxCommand = MBX_READ_LNK_STAT;
2012 mb->mbxOwner = OWN_HOST;
2013 mb->un.varRdLnk.rec = 0; /* req_ext_counters */
2014 mb->un.varRdLnk.clrc = 0; /* clear_all_counters */
2015 mb->un.varRdLnk.clof = 0; /* clear_overflow_flags */
2016 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2017 mbq->port = (void *)&PPORT;
2018
2019 } /* emlxs_mb_read_lnk_stat() */
2020
2021
2022
2023
2024
2025
2026 /*
2027 * emlxs_mb_config_ring Issue a CONFIG RING mailbox command
2028 */
2029 extern void
emlxs_mb_config_ring(emlxs_hba_t * hba,int32_t ring,MAILBOXQ * mbq)2030 emlxs_mb_config_ring(emlxs_hba_t *hba, int32_t ring, MAILBOXQ *mbq)
2031 {
2032 MAILBOX *mb = (MAILBOX *)mbq;
2033 int32_t i;
2034 int32_t j;
2035
2036 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2037
2038 j = 0;
2039 for (i = 0; i < ring; i++) {
2040 j += hba->sli.sli3.ring_masks[i];
2041 }
2042
2043 for (i = 0; i < hba->sli.sli3.ring_masks[ring]; i++) {
2044 if ((j + i) >= 6) {
2045 break;
2046 }
2047
2048 mb->un.varCfgRing.rrRegs[i].rval =
2049 hba->sli.sli3.ring_rval[j + i];
2050 mb->un.varCfgRing.rrRegs[i].rmask =
2051 hba->sli.sli3.ring_rmask[j + i];
2052 mb->un.varCfgRing.rrRegs[i].tval =
2053 hba->sli.sli3.ring_tval[j + i];
2054 mb->un.varCfgRing.rrRegs[i].tmask =
2055 hba->sli.sli3.ring_tmask[j + i];
2056 }
2057
2058 mb->un.varCfgRing.ring = ring;
2059 mb->un.varCfgRing.profile = 0;
2060 mb->un.varCfgRing.maxOrigXchg = 0;
2061 mb->un.varCfgRing.maxRespXchg = 0;
2062 mb->un.varCfgRing.recvNotify = 1;
2063 mb->un.varCfgRing.numMask = hba->sli.sli3.ring_masks[ring];
2064 mb->mbxCommand = MBX_CONFIG_RING;
2065 mb->mbxOwner = OWN_HOST;
2066 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2067 mbq->port = (void *)&PPORT;
2068
2069 return;
2070
2071 } /* emlxs_mb_config_ring() */
2072
2073
2074 /*
2075 * emlxs_mb_config_link Issue a CONFIG LINK mailbox command
2076 */
2077 extern void
emlxs_mb_config_link(emlxs_hba_t * hba,MAILBOXQ * mbq)2078 emlxs_mb_config_link(emlxs_hba_t *hba, MAILBOXQ *mbq)
2079 {
2080 MAILBOX *mb = (MAILBOX *)mbq;
2081 emlxs_port_t *port = &PPORT;
2082 emlxs_config_t *cfg = &CFG;
2083
2084 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2085
2086 /*
2087 * NEW_FEATURE SLI-2, Coalescing Response Feature.
2088 */
2089 if (cfg[CFG_CR_DELAY].current) {
2090 mb->un.varCfgLnk.cr = 1;
2091 mb->un.varCfgLnk.ci = 1;
2092 mb->un.varCfgLnk.cr_delay = cfg[CFG_CR_DELAY].current;
2093 mb->un.varCfgLnk.cr_count = cfg[CFG_CR_COUNT].current;
2094 }
2095
2096 if (cfg[CFG_ACK0].current) {
2097 mb->un.varCfgLnk.ack0_enable = 1;
2098 }
2099
2100 mb->un.varCfgLnk.myId = port->did;
2101 mb->un.varCfgLnk.edtov = hba->fc_edtov;
2102 mb->un.varCfgLnk.arbtov = hba->fc_arbtov;
2103 mb->un.varCfgLnk.ratov = hba->fc_ratov;
2104 mb->un.varCfgLnk.rttov = hba->fc_rttov;
2105 mb->un.varCfgLnk.altov = hba->fc_altov;
2106 mb->un.varCfgLnk.crtov = hba->fc_crtov;
2107
2108 mb->mbxCommand = MBX_CONFIG_LINK;
2109 mb->mbxOwner = OWN_HOST;
2110 mbq->mbox_cmpl = NULL;
2111 mbq->port = (void *)port;
2112
2113 return;
2114
2115 } /* emlxs_mb_config_link() */
2116
2117
2118 static uint32_t
emlxs_init_link_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)2119 emlxs_init_link_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2120 {
2121 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2122 emlxs_config_t *cfg = &CFG;
2123 MAILBOX *mb;
2124
2125 mb = (MAILBOX *)mbq;
2126 if (mb->mbxStatus) {
2127 if ((hba->flag & FC_SLIM2_MODE) &&
2128 (hba->mbox_queue_flag == MBX_NOWAIT)) {
2129 /* Retry only MBX_NOWAIT requests */
2130
2131 if ((cfg[CFG_LINK_SPEED].current > 0) &&
2132 ((mb->mbxStatus == 0x0011) ||
2133 (mb->mbxStatus == 0x0500))) {
2134
2135 EMLXS_MSGF(EMLXS_CONTEXT,
2136 &emlxs_mbox_event_msg,
2137 "Retrying. %s: status=%x. Auto-speed set.",
2138 emlxs_mb_cmd_xlate(mb->mbxCommand),
2139 (uint32_t)mb->mbxStatus);
2140
2141 mb->un.varInitLnk.link_flags &=
2142 ~FLAGS_LINK_SPEED;
2143 mb->un.varInitLnk.link_speed = 0;
2144
2145 emlxs_mb_retry(hba, mbq);
2146 return (1);
2147 }
2148 }
2149 }
2150 return (0);
2151
2152 } /* emlxs_init_link_mbcmpl() */
2153
2154
2155 /*
2156 * emlxs_mb_init_link Issue an INIT LINK mailbox command
2157 */
2158 extern void
emlxs_mb_init_link(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t topology,uint32_t linkspeed)2159 emlxs_mb_init_link(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t topology,
2160 uint32_t linkspeed)
2161 {
2162 MAILBOX *mb = (MAILBOX *)mbq;
2163 emlxs_vpd_t *vpd = &VPD;
2164 emlxs_config_t *cfg = &CFG;
2165
2166 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
2167 (SLI4_FCOE_MODE)) {
2168 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
2169 mbq->nonembed = NULL;
2170 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2171 mbq->port = (void *)&PPORT;
2172
2173 mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK;
2174 mb->mbxOwner = OWN_HOST;
2175 return;
2176 }
2177
2178 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2179
2180 switch (topology) {
2181 case FLAGS_LOCAL_LB:
2182 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2183 mb->un.varInitLnk.link_flags |= FLAGS_LOCAL_LB;
2184 break;
2185 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
2186 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2187 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
2188 break;
2189 case FLAGS_TOPOLOGY_MODE_PT_PT:
2190 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
2191 break;
2192 case FLAGS_TOPOLOGY_MODE_LOOP:
2193 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2194 break;
2195 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
2196 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
2197 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
2198 break;
2199 }
2200
2201 if (cfg[CFG_LILP_ENABLE].current == 0) {
2202 /* Disable LIRP/LILP support */
2203 mb->un.varInitLnk.link_flags |= FLAGS_LIRP_LILP;
2204 }
2205
2206 /*
2207 * Setting up the link speed
2208 */
2209 switch (linkspeed) {
2210 case 0:
2211 break;
2212
2213 case 1:
2214 linkspeed = (vpd->link_speed & LMT_1GB_CAPABLE) == 0 ? 0 :
2215 LINK_SPEED_1G;
2216 break;
2217
2218 case 2:
2219 linkspeed = (vpd->link_speed & LMT_2GB_CAPABLE) == 0 ? 0 :
2220 LINK_SPEED_2G;
2221 break;
2222
2223 case 4:
2224 linkspeed = (vpd->link_speed & LMT_4GB_CAPABLE) == 0 ? 0 :
2225 LINK_SPEED_4G;
2226 break;
2227
2228 case 8:
2229 linkspeed = (vpd->link_speed & LMT_8GB_CAPABLE) == 0 ? 0 :
2230 LINK_SPEED_8G;
2231 break;
2232
2233 case 10:
2234 linkspeed = (vpd->link_speed & LMT_10GB_CAPABLE) == 0 ? 0 :
2235 LINK_SPEED_10G;
2236 break;
2237
2238 case 16:
2239 linkspeed = (vpd->link_speed & LMT_16GB_CAPABLE) == 0 ? 0 :
2240 LINK_SPEED_16G;
2241 break;
2242
2243 case 32:
2244 linkspeed = (vpd->link_speed & LMT_32GB_CAPABLE) == 0 ? 0 :
2245 LINK_SPEED_32G;
2246 break;
2247
2248 default:
2249 linkspeed = 0;
2250 break;
2251
2252 }
2253
2254 if ((linkspeed > 0) && (vpd->feaLevelHigh >= 0x02)) {
2255 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
2256 mb->un.varInitLnk.link_speed = linkspeed;
2257 }
2258
2259 if (hba->sli_mode == 3) {
2260 mb->un.varInitLnk.link_flags |= FLAGS_PREABORT_RETURN;
2261 }
2262
2263 mb->un.varInitLnk.fabric_AL_PA =
2264 (uint8_t)cfg[CFG_ASSIGN_ALPA].current;
2265 mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK;
2266 mb->mbxOwner = OWN_HOST;
2267 mbq->mbox_cmpl = emlxs_init_link_mbcmpl;
2268 mbq->port = (void *)&PPORT;
2269
2270
2271 return;
2272
2273 } /* emlxs_mb_init_link() */
2274
2275
2276 /*
2277 * emlxs_mb_down_link Issue a DOWN LINK mailbox command
2278 */
2279 /*ARGSUSED*/
2280 extern void
emlxs_mb_down_link(emlxs_hba_t * hba,MAILBOXQ * mbq)2281 emlxs_mb_down_link(emlxs_hba_t *hba, MAILBOXQ *mbq)
2282 {
2283 MAILBOX *mb = (MAILBOX *)mbq;
2284
2285 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2286
2287 mb->mbxCommand = MBX_DOWN_LINK;
2288 mb->mbxOwner = OWN_HOST;
2289 mbq->mbox_cmpl = NULL;
2290 mbq->port = (void *)&PPORT;
2291
2292 return;
2293
2294 } /* emlxs_mb_down_link() */
2295
2296
2297 static uint32_t
emlxs_read_sparam_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)2298 emlxs_read_sparam_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2299 {
2300 emlxs_port_t *port = &PPORT;
2301 MAILBOX *mb;
2302 MATCHMAP *mp;
2303 emlxs_port_t *vport;
2304 int32_t i;
2305 uint32_t control;
2306 uint8_t null_wwn[8];
2307
2308 mb = (MAILBOX *)mbq;
2309 if (mb->mbxStatus) {
2310 if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
2311 control = mb->un.varRdSparm.un.sp64.tus.f.bdeSize;
2312 if (control == 0) {
2313 (void) emlxs_mb_read_sparam(hba, mbq);
2314 }
2315 emlxs_mb_retry(hba, mbq);
2316 return (1);
2317 }
2318 return (0);
2319 }
2320 mp = (MATCHMAP *)mbq->bp;
2321 if (!mp) {
2322 return (0);
2323 }
2324
2325 bcopy((caddr_t)mp->virt, (caddr_t)&hba->sparam, sizeof (SERV_PARM));
2326
2327 /* Initialize the node name and port name only once */
2328 bzero(null_wwn, 8);
2329 if ((bcmp((caddr_t)&hba->wwnn, (caddr_t)null_wwn, 8) == 0) &&
2330 (bcmp((caddr_t)&hba->wwpn, (caddr_t)null_wwn, 8) == 0)) {
2331 bcopy((caddr_t)&hba->sparam.nodeName,
2332 (caddr_t)&hba->wwnn, sizeof (NAME_TYPE));
2333
2334 bcopy((caddr_t)&hba->sparam.portName,
2335 (caddr_t)&hba->wwpn, sizeof (NAME_TYPE));
2336 } else {
2337 bcopy((caddr_t)&hba->wwnn,
2338 (caddr_t)&hba->sparam.nodeName, sizeof (NAME_TYPE));
2339
2340 bcopy((caddr_t)&hba->wwpn,
2341 (caddr_t)&hba->sparam.portName, sizeof (NAME_TYPE));
2342 }
2343
2344 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2345 "SPARAM: EDTOV hba=%x mbox_csp=%x BBC=%x",
2346 hba->fc_edtov, hba->sparam.cmn.e_d_tov,
2347 hba->sparam.cmn.bbCreditlsb);
2348
2349 /* Initialize the physical port */
2350 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
2351 sizeof (SERV_PARM));
2352 bcopy((caddr_t)&hba->wwpn, (caddr_t)&port->wwpn,
2353 sizeof (NAME_TYPE));
2354 bcopy((caddr_t)&hba->wwnn, (caddr_t)&port->wwnn,
2355 sizeof (NAME_TYPE));
2356
2357 /* Initialize the virtual ports */
2358 for (i = 1; i < MAX_VPORTS; i++) {
2359 vport = &VPORT(i);
2360 if (! (vport->flag & EMLXS_PORT_BOUND)) {
2361 continue;
2362 }
2363
2364 bcopy((caddr_t)&hba->sparam,
2365 (caddr_t)&vport->sparam,
2366 sizeof (SERV_PARM));
2367
2368 bcopy((caddr_t)&vport->wwnn,
2369 (caddr_t)&vport->sparam.nodeName,
2370 sizeof (NAME_TYPE));
2371
2372 bcopy((caddr_t)&vport->wwpn,
2373 (caddr_t)&vport->sparam.portName,
2374 sizeof (NAME_TYPE));
2375 }
2376
2377 return (0);
2378
2379 } /* emlxs_read_sparam_mbcmpl() */
2380
2381
2382 /*
2383 * emlxs_mb_read_sparam Issue a READ SPARAM mailbox command
2384 */
2385 extern uint32_t
emlxs_mb_read_sparam(emlxs_hba_t * hba,MAILBOXQ * mbq)2386 emlxs_mb_read_sparam(emlxs_hba_t *hba, MAILBOXQ *mbq)
2387 {
2388 MAILBOX *mb = (MAILBOX *)mbq;
2389 MATCHMAP *mp;
2390
2391 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2392
2393 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
2394 mb->mbxCommand = MBX_READ_SPARM64;
2395
2396 return (1);
2397 }
2398
2399 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
2400 mb->un.varRdSparm.un.sp64.addrHigh = PADDR_HI(mp->phys);
2401 mb->un.varRdSparm.un.sp64.addrLow = PADDR_LO(mp->phys);
2402 mb->mbxCommand = MBX_READ_SPARM64;
2403 mb->mbxOwner = OWN_HOST;
2404 mbq->mbox_cmpl = emlxs_read_sparam_mbcmpl;
2405 mbq->port = (void *)&PPORT;
2406
2407 /*
2408 * save address for completion
2409 */
2410 mbq->bp = (void *)mp;
2411
2412 return (0);
2413
2414 } /* emlxs_mb_read_sparam() */
2415
2416
2417 /*
2418 * emlxs_mb_read_rpi Issue a READ RPI mailbox command
2419 */
2420 /*ARGSUSED*/
2421 extern uint32_t
emlxs_mb_read_rpi(emlxs_hba_t * hba,uint32_t rpi,MAILBOXQ * mbq,uint32_t flag)2422 emlxs_mb_read_rpi(emlxs_hba_t *hba, uint32_t rpi, MAILBOXQ *mbq,
2423 uint32_t flag)
2424 {
2425 MAILBOX *mb = (MAILBOX *)mbq;
2426
2427 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2428
2429 /*
2430 * Set flag to issue action on cmpl
2431 */
2432 mb->un.varWords[30] = flag;
2433 mb->un.varRdRPI.reqRpi = (volatile uint16_t) rpi;
2434 mb->mbxCommand = MBX_READ_RPI64;
2435 mb->mbxOwner = OWN_HOST;
2436 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2437 mbq->port = (void *)&PPORT;
2438
2439 return (0);
2440 } /* emlxs_mb_read_rpi() */
2441
2442
2443 /*
2444 * emlxs_mb_read_xri Issue a READ XRI mailbox command
2445 */
2446 /*ARGSUSED*/
2447 extern uint32_t
emlxs_mb_read_xri(emlxs_hba_t * hba,uint32_t xri,MAILBOXQ * mbq,uint32_t flag)2448 emlxs_mb_read_xri(emlxs_hba_t *hba, uint32_t xri, MAILBOXQ *mbq,
2449 uint32_t flag)
2450 {
2451 MAILBOX *mb = (MAILBOX *)mbq;
2452
2453 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2454
2455 /*
2456 * Set flag to issue action on cmpl
2457 */
2458 mb->un.varWords[30] = flag;
2459 mb->un.varRdXRI.reqXri = (volatile uint16_t)xri;
2460 mb->mbxCommand = MBX_READ_XRI;
2461 mb->mbxOwner = OWN_HOST;
2462 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2463 mbq->port = (void *)&PPORT;
2464
2465 return (0);
2466 } /* emlxs_mb_read_xri() */
2467
2468
2469 /*ARGSUSED*/
2470 extern int32_t
emlxs_mb_check_sparm(emlxs_hba_t * hba,SERV_PARM * nsp)2471 emlxs_mb_check_sparm(emlxs_hba_t *hba, SERV_PARM *nsp)
2472 {
2473 uint32_t nsp_value;
2474 uint32_t *iptr;
2475
2476 if (nsp->cmn.fPort) {
2477 return (0);
2478 }
2479
2480 /* Validate the service parameters */
2481 iptr = (uint32_t *)&nsp->portName;
2482 if (iptr[0] == 0 && iptr[1] == 0) {
2483 return (1);
2484 }
2485
2486 iptr = (uint32_t *)&nsp->nodeName;
2487 if (iptr[0] == 0 && iptr[1] == 0) {
2488 return (2);
2489 }
2490
2491 if (nsp->cls2.classValid) {
2492 nsp_value =
2493 ((nsp->cls2.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls2.
2494 rcvDataSizeLsb;
2495
2496 /* If the receive data length is zero then set it to */
2497 /* the CSP value */
2498 if (!nsp_value) {
2499 nsp->cls2.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb;
2500 nsp->cls2.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb;
2501 return (0);
2502 }
2503 }
2504
2505 if (nsp->cls3.classValid) {
2506 nsp_value =
2507 ((nsp->cls3.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls3.
2508 rcvDataSizeLsb;
2509
2510 /* If the receive data length is zero then set it to */
2511 /* the CSP value */
2512 if (!nsp_value) {
2513 nsp->cls3.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb;
2514 nsp->cls3.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb;
2515 return (0);
2516 }
2517 }
2518
2519 return (0);
2520
2521 } /* emlxs_mb_check_sparm() */
2522
2523
2524
2525
2526 /*
2527 * emlxs_mb_set_var Issue a special debug mbox command to write slim
2528 */
2529 /*ARGSUSED*/
2530 extern void
emlxs_mb_set_var(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t addr,uint32_t value)2531 emlxs_mb_set_var(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t addr,
2532 uint32_t value)
2533 {
2534 MAILBOX *mb = (MAILBOX *)mbq;
2535
2536 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2537
2538 /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
2539 /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
2540 /* addr = 0x100506 is for setting PCI MAX READ value */
2541
2542 /*
2543 * Always turn on DELAYED ABTS for ELS timeouts
2544 */
2545 if ((addr == 0x052198) && (value == 0)) {
2546 value = 1;
2547 }
2548
2549 mb->un.varWords[0] = addr;
2550 mb->un.varWords[1] = value;
2551 mb->mbxCommand = MBX_SET_VARIABLE;
2552 mb->mbxOwner = OWN_HOST;
2553 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2554 mbq->port = (void *)&PPORT;
2555
2556 } /* emlxs_mb_set_var() */
2557
2558
2559 /*
2560 * Disable Traffic Cop
2561 */
2562 /*ARGSUSED*/
2563 extern void
emlxs_disable_tc(emlxs_hba_t * hba,MAILBOXQ * mbq)2564 emlxs_disable_tc(emlxs_hba_t *hba, MAILBOXQ *mbq)
2565 {
2566 MAILBOX *mb = (MAILBOX *)mbq;
2567
2568 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2569
2570 mb->un.varWords[0] = 0x50797;
2571 mb->un.varWords[1] = 0;
2572 mb->un.varWords[2] = 0xfffffffe;
2573 mb->mbxCommand = MBX_SET_VARIABLE;
2574 mb->mbxOwner = OWN_HOST;
2575 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2576 mbq->port = (void *)&PPORT;
2577
2578 } /* emlxs_disable_tc() */
2579
2580
2581 extern void
emlxs_mb_config_hbq(emlxs_hba_t * hba,MAILBOXQ * mbq,int hbq_id)2582 emlxs_mb_config_hbq(emlxs_hba_t *hba, MAILBOXQ *mbq, int hbq_id)
2583 {
2584 HBQ_INIT_t *hbq;
2585 MAILBOX *mb = (MAILBOX *)mbq;
2586 int i;
2587
2588 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2589
2590 hbq = &hba->sli.sli3.hbq_table[hbq_id];
2591
2592 mb->un.varCfgHbq.hbqId = hbq_id;
2593 mb->un.varCfgHbq.numEntries = hbq->HBQ_numEntries;
2594 mb->un.varCfgHbq.recvNotify = hbq->HBQ_recvNotify;
2595 mb->un.varCfgHbq.numMask = hbq->HBQ_num_mask;
2596 mb->un.varCfgHbq.profile = hbq->HBQ_profile;
2597 mb->un.varCfgHbq.ringMask = hbq->HBQ_ringMask;
2598 mb->un.varCfgHbq.headerLen = hbq->HBQ_headerLen;
2599 mb->un.varCfgHbq.logEntry = hbq->HBQ_logEntry;
2600 mb->un.varCfgHbq.hbqaddrLow = PADDR_LO(hbq->HBQ_host_buf.phys);
2601 mb->un.varCfgHbq.hbqaddrHigh = PADDR_HI(hbq->HBQ_host_buf.phys);
2602 mb->mbxCommand = MBX_CONFIG_HBQ;
2603 mb->mbxOwner = OWN_HOST;
2604 mbq->mbox_cmpl = NULL;
2605 mbq->port = (void *)&PPORT;
2606
2607 /* Copy info for profiles 2,3,5. Other profiles this area is reserved */
2608 if ((hbq->HBQ_profile == 2) || (hbq->HBQ_profile == 3) ||
2609 (hbq->HBQ_profile == 5)) {
2610 bcopy(&hbq->profiles.allprofiles,
2611 (void *)&mb->un.varCfgHbq.profiles.allprofiles,
2612 sizeof (hbq->profiles));
2613 }
2614
2615 /* Return if no rctl / type masks for this HBQ */
2616 if (!hbq->HBQ_num_mask) {
2617 return;
2618 }
2619
2620 /* Otherwise we setup specific rctl / type masks for this HBQ */
2621 for (i = 0; i < hbq->HBQ_num_mask; i++) {
2622 mb->un.varCfgHbq.hbqMasks[i].tmatch =
2623 hbq->HBQ_Masks[i].tmatch;
2624 mb->un.varCfgHbq.hbqMasks[i].tmask = hbq->HBQ_Masks[i].tmask;
2625 mb->un.varCfgHbq.hbqMasks[i].rctlmatch =
2626 hbq->HBQ_Masks[i].rctlmatch;
2627 mb->un.varCfgHbq.hbqMasks[i].rctlmask =
2628 hbq->HBQ_Masks[i].rctlmask;
2629 }
2630
2631 return;
2632
2633 } /* emlxs_mb_config_hbq() */
2634
2635
2636 /* SLI3 */
2637 static uint32_t
emlxs_reg_vpi_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)2638 emlxs_reg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2639 {
2640 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2641 MAILBOX *mb;
2642
2643 mb = (MAILBOX *)mbq;
2644
2645 mutex_enter(&EMLXS_PORT_LOCK);
2646
2647 if (mb->mbxStatus != MBX_SUCCESS) {
2648 port->flag &= ~EMLXS_PORT_REG_VPI;
2649 mutex_exit(&EMLXS_PORT_LOCK);
2650
2651 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2652 "cmpl_reg_vpi:%d failed. status=%x",
2653 port->vpi, mb->mbxStatus);
2654 return (0);
2655 }
2656
2657 port->flag |= EMLXS_PORT_REG_VPI_CMPL;
2658
2659 mutex_exit(&EMLXS_PORT_LOCK);
2660
2661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2662 "cmpl_reg_vpi:%d ",
2663 port->vpi);
2664
2665 return (0);
2666
2667 } /* emlxs_reg_vpi_mbcmpl */
2668
2669
2670 /* SLI3 */
2671 extern uint32_t
emlxs_mb_reg_vpi(emlxs_port_t * port,emlxs_buf_t * sbp)2672 emlxs_mb_reg_vpi(emlxs_port_t *port, emlxs_buf_t *sbp)
2673 {
2674 emlxs_hba_t *hba = HBA;
2675 MAILBOXQ *mbq;
2676 MAILBOX *mb;
2677 int rval;
2678
2679 if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) {
2680 return (1);
2681 }
2682
2683 if (!(hba->flag & FC_NPIV_ENABLED)) {
2684 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2685 "reg_vpi:%d failed. NPIV disabled.",
2686 port->vpi);
2687 return (1);
2688 }
2689
2690 if (port->flag & EMLXS_PORT_REG_VPI) {
2691 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2692 "reg_vpi:%d failed. Already registered.",
2693 port->vpi);
2694 return (0);
2695 }
2696
2697 mutex_enter(&EMLXS_PORT_LOCK);
2698
2699 /* Can't reg vpi until ClearLA is sent */
2700 if (hba->state != FC_READY) {
2701 mutex_exit(&EMLXS_PORT_LOCK);
2702
2703 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2704 "reg_vpi:%d failed. HBA state not READY",
2705 port->vpi);
2706 return (1);
2707 }
2708
2709 /* Must have port id */
2710 if (!port->did) {
2711 mutex_exit(&EMLXS_PORT_LOCK);
2712
2713 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2714 "reg_vpi:%d failed. Port did=0",
2715 port->vpi);
2716 return (1);
2717 }
2718
2719 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
2720 mutex_exit(&EMLXS_PORT_LOCK);
2721
2722 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2723 "reg_vpi:%d failed. Unable to allocate mbox.",
2724 port->vpi);
2725 return (1);
2726 }
2727
2728 port->flag |= EMLXS_PORT_REG_VPI;
2729
2730 mutex_exit(&EMLXS_PORT_LOCK);
2731
2732 mb = (MAILBOX *)mbq->mbox;
2733 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2734
2735 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2736 "reg_vpi:%d", port->vpi);
2737
2738 mb->un.varRegVpi.vpi = port->vpi;
2739 mb->un.varRegVpi.sid = port->did;
2740 mb->mbxCommand = MBX_REG_VPI;
2741 mb->mbxOwner = OWN_HOST;
2742
2743 mbq->sbp = (void *)sbp;
2744 mbq->mbox_cmpl = emlxs_reg_vpi_mbcmpl;
2745 mbq->context = NULL;
2746 mbq->port = (void *)port;
2747
2748 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2749 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2751 "reg_vpi:%d failed. Unable to send request.",
2752 port->vpi);
2753
2754 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2755 return (1);
2756 }
2757
2758 return (0);
2759
2760 } /* emlxs_mb_reg_vpi() */
2761
2762
2763 /* SLI3 */
2764 static uint32_t
emlxs_unreg_vpi_mbcmpl(emlxs_hba_t * hba,MAILBOXQ * mbq)2765 emlxs_unreg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2766 {
2767 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2768 MAILBOX *mb;
2769
2770 mb = (MAILBOX *)mbq->mbox;
2771
2772 if (mb->mbxStatus != MBX_SUCCESS) {
2773 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2774 "unreg_vpi_mbcmpl:%d failed. status=%x",
2775 port->vpi, mb->mbxStatus);
2776 return (0);
2777 }
2778
2779 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2780 "unreg_vpi_mbcmpl:%d", port->vpi);
2781
2782 mutex_enter(&EMLXS_PORT_LOCK);
2783 port->flag &= ~EMLXS_PORT_REG_VPI_CMPL;
2784 mutex_exit(&EMLXS_PORT_LOCK);
2785
2786 return (0);
2787
2788 } /* emlxs_unreg_vpi_mbcmpl() */
2789
2790
2791 /* SLI3 */
2792 extern uint32_t
emlxs_mb_unreg_vpi(emlxs_port_t * port)2793 emlxs_mb_unreg_vpi(emlxs_port_t *port)
2794 {
2795 emlxs_hba_t *hba = HBA;
2796 MAILBOXQ *mbq;
2797 MAILBOX *mb;
2798 int rval;
2799
2800 if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) {
2801 return (1);
2802 }
2803
2804 mutex_enter(&EMLXS_PORT_LOCK);
2805
2806 if (!(port->flag & EMLXS_PORT_REG_VPI) ||
2807 !(port->flag & EMLXS_PORT_REG_VPI_CMPL)) {
2808
2809 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2810 "unreg_vpi:%d failed. Not registered. flag=%x",
2811 port->vpi, port->flag);
2812
2813 mutex_exit(&EMLXS_PORT_LOCK);
2814 return (0);
2815 }
2816
2817 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
2818 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2819 "unreg_vpi:%d failed. Unable to allocate mbox.",
2820 port->vpi);
2821
2822 mutex_exit(&EMLXS_PORT_LOCK);
2823 return (1);
2824 }
2825
2826 port->flag &= ~EMLXS_PORT_REG_VPI;
2827
2828 mutex_exit(&EMLXS_PORT_LOCK);
2829
2830 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2831 "unreg_vpi:%d", port->vpi);
2832
2833 mb = (MAILBOX *)mbq->mbox;
2834 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2835 mb->un.varUnregVpi.vpi = port->vpi;
2836 mb->mbxCommand = MBX_UNREG_VPI;
2837 mb->mbxOwner = OWN_HOST;
2838
2839 mbq->mbox_cmpl = emlxs_unreg_vpi_mbcmpl;
2840 mbq->context = NULL;
2841 mbq->port = (void *)port;
2842
2843 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2844 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2845 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2846 "unreg_vpi:%d failed. Unable to send request.",
2847 port->vpi);
2848
2849 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2850 return (1);
2851 }
2852
2853 return (0);
2854
2855 } /* emlxs_mb_unreg_vpi() */
2856
2857
2858 /*
2859 * emlxs_mb_config_farp Issue a CONFIG FARP mailbox command
2860 */
2861 extern void
emlxs_mb_config_farp(emlxs_hba_t * hba,MAILBOXQ * mbq)2862 emlxs_mb_config_farp(emlxs_hba_t *hba, MAILBOXQ *mbq)
2863 {
2864 MAILBOX *mb = (MAILBOX *)mbq;
2865
2866 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2867
2868 bcopy((uint8_t *)&hba->wwpn,
2869 (uint8_t *)&mb->un.varCfgFarp.portname, sizeof (NAME_TYPE));
2870
2871 bcopy((uint8_t *)&hba->wwpn,
2872 (uint8_t *)&mb->un.varCfgFarp.nodename, sizeof (NAME_TYPE));
2873
2874 mb->un.varCfgFarp.filterEnable = 1;
2875 mb->un.varCfgFarp.portName = 1;
2876 mb->un.varCfgFarp.nodeName = 1;
2877 mb->mbxCommand = MBX_CONFIG_FARP;
2878 mb->mbxOwner = OWN_HOST;
2879 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2880 mbq->port = (void *)&PPORT;
2881
2882 } /* emlxs_mb_config_farp() */
2883
2884
2885 /*
2886 * emlxs_mb_read_nv Issue a READ CONFIG mailbox command
2887 */
2888 /*ARGSUSED*/
2889 extern void
emlxs_mb_read_config(emlxs_hba_t * hba,MAILBOXQ * mbq)2890 emlxs_mb_read_config(emlxs_hba_t *hba, MAILBOXQ *mbq)
2891 {
2892 MAILBOX *mb = (MAILBOX *)mbq;
2893
2894 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2895 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
2896 mbq->nonembed = NULL;
2897 } else {
2898 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2899 }
2900
2901 mb->mbxCommand = MBX_READ_CONFIG;
2902 mb->mbxOwner = OWN_HOST;
2903 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2904 mbq->port = (void *)&PPORT;
2905
2906 } /* emlxs_mb_read_config() */
2907
2908
2909 /*
2910 * NAME: emlxs_mb_put
2911 *
2912 * FUNCTION: put mailbox cmd onto the mailbox queue.
2913 *
2914 * EXECUTION ENVIRONMENT: process and interrupt level.
2915 *
2916 * NOTES:
2917 *
2918 * CALLED FROM: EMLXS_SLI_ISSUE_MBOX_CMD
2919 *
2920 * INPUT: hba - pointer to the device info area
2921 * mbp - pointer to mailbox queue entry of mailbox cmd
2922 *
2923 * RETURNS: NULL - command queued
2924 */
2925 extern void
emlxs_mb_put(emlxs_hba_t * hba,MAILBOXQ * mbq)2926 emlxs_mb_put(emlxs_hba_t *hba, MAILBOXQ *mbq)
2927 {
2928
2929 mutex_enter(&EMLXS_MBOX_LOCK);
2930
2931 if (hba->mbox_queue.q_first) {
2932
2933 /*
2934 * queue command to end of list
2935 */
2936 ((MAILBOXQ *)hba->mbox_queue.q_last)->next = mbq;
2937 hba->mbox_queue.q_last = (uint8_t *)mbq;
2938 hba->mbox_queue.q_cnt++;
2939 } else {
2940
2941 /*
2942 * add command to empty list
2943 */
2944 hba->mbox_queue.q_first = (uint8_t *)mbq;
2945 hba->mbox_queue.q_last = (uint8_t *)mbq;
2946 hba->mbox_queue.q_cnt = 1;
2947 }
2948
2949 mbq->next = NULL;
2950
2951 mutex_exit(&EMLXS_MBOX_LOCK);
2952 } /* emlxs_mb_put() */
2953
2954
2955 /*
2956 * NAME: emlxs_mb_get
2957 *
2958 * FUNCTION: get a mailbox command from mailbox command queue
2959 *
2960 * EXECUTION ENVIRONMENT: interrupt level.
2961 *
2962 * NOTES:
2963 *
2964 * CALLED FROM: emlxs_handle_mb_event
2965 *
2966 * INPUT: hba - pointer to the device info area
2967 *
2968 * RETURNS: NULL - no match found mb pointer - pointer to a mailbox command
2969 */
2970 extern MAILBOXQ *
emlxs_mb_get(emlxs_hba_t * hba)2971 emlxs_mb_get(emlxs_hba_t *hba)
2972 {
2973 MAILBOXQ *p_first = NULL;
2974
2975 mutex_enter(&EMLXS_MBOX_LOCK);
2976
2977 if (hba->mbox_queue.q_first) {
2978 p_first = (MAILBOXQ *)hba->mbox_queue.q_first;
2979 hba->mbox_queue.q_first = (uint8_t *)p_first->next;
2980
2981 if (hba->mbox_queue.q_first == NULL) {
2982 hba->mbox_queue.q_last = NULL;
2983 hba->mbox_queue.q_cnt = 0;
2984 } else {
2985 hba->mbox_queue.q_cnt--;
2986 }
2987
2988 p_first->next = NULL;
2989 }
2990
2991 mutex_exit(&EMLXS_MBOX_LOCK);
2992
2993 return (p_first);
2994
2995 } /* emlxs_mb_get() */
2996
2997
2998 /* EMLXS_PORT_LOCK must be held when calling this */
2999 void
emlxs_mb_init(emlxs_hba_t * hba,MAILBOXQ * mbq,uint32_t flag,uint32_t tmo)3000 emlxs_mb_init(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t flag, uint32_t tmo)
3001 {
3002 MATCHMAP *mp;
3003
3004 HBASTATS.MboxIssued++;
3005 hba->mbox_queue_flag = flag;
3006
3007 /* Set the Mailbox timer */
3008 if (hba->timer_tics) {
3009 hba->mbox_timer = hba->timer_tics + tmo;
3010 } else {
3011 hba->mbox_timer = DRV_TIME + tmo;
3012 }
3013
3014 /* Initialize mailbox */
3015 mbq->flag &= MBQ_INIT_MASK;
3016 mbq->next = 0;
3017
3018 mutex_enter(&EMLXS_MBOX_LOCK);
3019 hba->mbox_mbq = (void *)mbq;
3020 mutex_exit(&EMLXS_MBOX_LOCK);
3021
3022 if (mbq->nonembed) {
3023 mp = (MATCHMAP *) mbq->nonembed;
3024 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3025 DDI_DMA_SYNC_FORDEV);
3026 }
3027
3028 if (mbq->bp) {
3029 mp = (MATCHMAP *) mbq->bp;
3030 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3031 DDI_DMA_SYNC_FORDEV);
3032 }
3033 return;
3034
3035 } /* emlxs_mb_init() */
3036
3037
3038 extern void
emlxs_mb_fini(emlxs_hba_t * hba,MAILBOX * mb,uint32_t mbxStatus)3039 emlxs_mb_fini(emlxs_hba_t *hba, MAILBOX *mb, uint32_t mbxStatus)
3040 {
3041 emlxs_port_t *port = &PPORT;
3042 MATCHMAP *mbox_nonembed;
3043 MATCHMAP *mbox_bp;
3044 emlxs_buf_t *mbox_sbp;
3045 fc_unsol_buf_t *mbox_ubp;
3046 IOCBQ *mbox_iocbq;
3047 MAILBOXQ *mbox_mbq;
3048 MAILBOX *mbox;
3049 uint32_t mbox_queue_flag;
3050
3051 mutex_enter(&EMLXS_PORT_LOCK);
3052
3053 if (hba->mbox_queue_flag) {
3054 HBASTATS.MboxCompleted++;
3055
3056 if (mbxStatus != MBX_SUCCESS) {
3057 HBASTATS.MboxError++;
3058 } else {
3059 HBASTATS.MboxGood++;
3060 }
3061 }
3062
3063 mutex_enter(&EMLXS_MBOX_LOCK);
3064 mbox_queue_flag = hba->mbox_queue_flag;
3065 mbox_mbq = (MAILBOXQ *)hba->mbox_mbq;
3066
3067 if (mbox_mbq) {
3068 mbox_nonembed = (MATCHMAP *)mbox_mbq->nonembed;
3069 mbox_bp = (MATCHMAP *)mbox_mbq->bp;
3070 mbox_sbp = (emlxs_buf_t *)mbox_mbq->sbp;
3071 mbox_ubp = (fc_unsol_buf_t *)mbox_mbq->ubp;
3072 mbox_iocbq = (IOCBQ *)mbox_mbq->iocbq;
3073 } else {
3074 mbox_nonembed = NULL;
3075 mbox_bp = NULL;
3076 mbox_sbp = NULL;
3077 mbox_ubp = NULL;
3078 mbox_iocbq = NULL;
3079 }
3080
3081 hba->mbox_mbq = NULL;
3082 hba->mbox_queue_flag = 0;
3083 hba->mbox_timer = 0;
3084 mutex_exit(&EMLXS_MBOX_LOCK);
3085
3086 mutex_exit(&EMLXS_PORT_LOCK);
3087
3088 #ifdef SFCT_SUPPORT
3089 if (mb && mbox_sbp && mbox_sbp->fct_cmd) {
3090 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg,
3091 "FCT mailbox: %s: status=%x",
3092 emlxs_mb_cmd_xlate(mb->mbxCommand),
3093 mb->mbxStatus);
3094 }
3095 #endif /* SFCT_SUPPORT */
3096
3097 if (mbox_queue_flag == MBX_NOWAIT) {
3098 /* Check for deferred MBUF cleanup */
3099 if (mbox_bp) {
3100 emlxs_mem_put(hba, MEM_BUF, (void *)mbox_bp);
3101 }
3102 if (mbox_nonembed) {
3103 emlxs_mem_put(hba, MEM_BUF,
3104 (void *)mbox_nonembed);
3105 }
3106 if (mbox_mbq) {
3107 emlxs_mem_put(hba, MEM_MBOX,
3108 (void *)mbox_mbq);
3109 }
3110 } else { /* MBX_WAIT */
3111 if (mbox_mbq) {
3112 if (mb) {
3113 /* Copy the local mailbox provided back into */
3114 /* the original mailbox */
3115 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3116 bcopy((uint32_t *)mb,
3117 (uint32_t *)mbox_mbq,
3118 MAILBOX_CMD_SLI4_BSIZE);
3119 } else {
3120 bcopy((uint32_t *)mb,
3121 (uint32_t *)mbox_mbq,
3122 MAILBOX_CMD_BSIZE);
3123 }
3124 }
3125
3126 mbox = (MAILBOX *)mbox_mbq;
3127 mbox->mbxStatus = (uint16_t)mbxStatus;
3128
3129 /* Mark mailbox complete */
3130 mbox_mbq->flag |= MBQ_COMPLETED;
3131 }
3132
3133 /* Wake up the sleeping thread */
3134 if (mbox_queue_flag == MBX_SLEEP) {
3135 mutex_enter(&EMLXS_MBOX_LOCK);
3136 cv_broadcast(&EMLXS_MBOX_CV);
3137 mutex_exit(&EMLXS_MBOX_LOCK);
3138 }
3139 }
3140
3141 emlxs_mb_deferred_cmpl(port, mbxStatus, mbox_sbp, mbox_ubp, mbox_iocbq);
3142
3143 return;
3144
3145 } /* emlxs_mb_fini() */
3146
3147
3148 extern void
emlxs_mb_deferred_cmpl(emlxs_port_t * port,uint32_t mbxStatus,emlxs_buf_t * sbp,fc_unsol_buf_t * ubp,IOCBQ * iocbq)3149 emlxs_mb_deferred_cmpl(emlxs_port_t *port, uint32_t mbxStatus, emlxs_buf_t *sbp,
3150 fc_unsol_buf_t *ubp, IOCBQ *iocbq)
3151 {
3152 emlxs_hba_t *hba = HBA;
3153 emlxs_ub_priv_t *ub_priv;
3154
3155 #ifdef SFCT_SUPPORT
3156 if (sbp && sbp->fct_cmd && (sbp->fct_state == EMLXS_FCT_REG_PENDING)) {
3157 mutex_enter(&EMLXS_PKT_LOCK);
3158 sbp->fct_flags |= EMLXS_FCT_REGISTERED;
3159 cv_broadcast(&EMLXS_PKT_CV);
3160 mutex_exit(&EMLXS_PKT_LOCK);
3161
3162 sbp = NULL;
3163 }
3164 #endif /* SFCT_SUPPORT */
3165
3166 /* Check for deferred pkt completion */
3167 if (sbp) {
3168 if (mbxStatus != MBX_SUCCESS) {
3169 /* Set error status */
3170 sbp->pkt_flags &= ~PACKET_STATE_VALID;
3171 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3172 IOERR_NO_RESOURCES, 1);
3173 }
3174
3175 emlxs_pkt_complete(sbp, -1, 0, 1);
3176 }
3177
3178 /* Check for deferred ub completion */
3179 if (ubp) {
3180 ub_priv = ubp->ub_fca_private;
3181
3182 if (mbxStatus == MBX_SUCCESS) {
3183 emlxs_ub_callback(ub_priv->port, ubp);
3184 } else {
3185 (void) emlxs_fca_ub_release(ub_priv->port, 1,
3186 &ubp->ub_token);
3187 }
3188 }
3189
3190 /* Special handling for restricted login */
3191 if (iocbq == (IOCBQ *)1) {
3192 iocbq = NULL;
3193 }
3194
3195 /* Check for deferred iocb tx */
3196 if (iocbq) {
3197 /* Check for driver special codes */
3198 /* These indicate the mailbox is being flushed */
3199 if (mbxStatus >= MBX_DRIVER_RESERVED) {
3200 /* Set the error status and return it */
3201 iocbq->iocb.ULPSTATUS = IOSTAT_LOCAL_REJECT;
3202 iocbq->iocb.un.grsp.perr.statLocalError =
3203 IOERR_ABORT_REQUESTED;
3204
3205 emlxs_proc_channel_event(hba, iocbq->channel,
3206 iocbq);
3207 } else {
3208 EMLXS_SLI_ISSUE_IOCB_CMD(hba, iocbq->channel,
3209 iocbq);
3210 }
3211 }
3212
3213 return;
3214
3215 } /* emlxs_mb_deferred_cmpl() */
3216
3217
3218 extern void
emlxs_mb_flush(emlxs_hba_t * hba)3219 emlxs_mb_flush(emlxs_hba_t *hba)
3220 {
3221 MAILBOXQ *mbq;
3222 uint32_t mbxStatus;
3223
3224 mbxStatus = (hba->flag & FC_HARDWARE_ERROR) ?
3225 MBX_HARDWARE_ERROR : MBX_NOT_FINISHED;
3226
3227 /* Flush out the active mbox command */
3228 emlxs_mb_fini(hba, NULL, mbxStatus);
3229
3230 /* Flush out the queued mbox commands */
3231 while (mbq = (MAILBOXQ *)emlxs_mb_get(hba)) {
3232 mutex_enter(&EMLXS_MBOX_LOCK);
3233 hba->mbox_queue_flag = MBX_NOWAIT;
3234 hba->mbox_mbq = (void *)mbq;
3235 mutex_exit(&EMLXS_MBOX_LOCK);
3236
3237 emlxs_mb_fini(hba, NULL, mbxStatus);
3238 }
3239
3240 return;
3241
3242 } /* emlxs_mb_flush */
3243
3244
3245 extern char *
emlxs_mb_cmd_xlate(uint8_t cmd)3246 emlxs_mb_cmd_xlate(uint8_t cmd)
3247 {
3248 static char buffer[32];
3249 uint32_t i;
3250 uint32_t count;
3251
3252 count = sizeof (emlxs_mb_cmd_table) / sizeof (emlxs_table_t);
3253 for (i = 0; i < count; i++) {
3254 if (cmd == emlxs_mb_cmd_table[i].code) {
3255 return (emlxs_mb_cmd_table[i].string);
3256 }
3257 }
3258
3259 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
3260 return (buffer);
3261
3262 } /* emlxs_mb_cmd_xlate() */
3263
3264 extern char *
emlxs_request_feature_xlate(uint32_t mask)3265 emlxs_request_feature_xlate(uint32_t mask)
3266 {
3267 static char buffer[64];
3268 uint32_t i;
3269
3270 bzero((char *)&buffer[0], 64);
3271 for (i = 0; i < 12; i++) {
3272 if (mask & (1<<i)) {
3273 (void) strlcat(buffer,
3274 emlxs_request_feature_table[i].string,
3275 sizeof (buffer));
3276 }
3277 }
3278 return (buffer);
3279 }
3280