1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2020 RackTop Systems, Inc.
26 */
27
28 #define DEF_ICFG 1
29
30 #include <emlxs.h>
31 #include <emlxs_version.h>
32
33
34 static char emlxs_copyright[] = EMLXS_COPYRIGHT;
35 char emlxs_revision[] = EMLXS_REVISION;
36 char emlxs_version[] = EMLXS_VERSION;
37 char emlxs_name[] = EMLXS_NAME;
38 char emlxs_label[] = EMLXS_LABEL;
39
40 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
41 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
42
43 #ifdef MENLO_SUPPORT
44 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
45 #endif /* MENLO_SUPPORT */
46
47 static void emlxs_fca_attach(emlxs_hba_t *hba);
48 static void emlxs_fca_detach(emlxs_hba_t *hba);
49 static void emlxs_drv_banner(emlxs_hba_t *hba);
50
51 static int32_t emlxs_get_props(emlxs_hba_t *hba);
52 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp,
53 uint32_t *pkt_flags);
54 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
60 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
61 static uint32_t emlxs_add_instance(int32_t ddiinst);
62 static void emlxs_iodone(emlxs_buf_t *sbp);
63 static int emlxs_pm_lower_power(dev_info_t *dip);
64 static int emlxs_pm_raise_power(dev_info_t *dip);
65 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
66 uint32_t failed);
67 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
68 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba);
69 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
70 uint32_t args, uint32_t *arg);
71
72 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
73 static void emlxs_read_vport_prop(emlxs_hba_t *hba);
74 #endif /* EMLXS_MODREV3 && EMLXS_MODREV4 */
75
76 static void emlxs_mode_init_masks(emlxs_hba_t *hba);
77
78
79 extern int
80 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id);
81 extern int
82 emlxs_select_msiid(emlxs_hba_t *hba);
83 extern void
84 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
85
86 /*
87 * Driver Entry Routines.
88 */
89 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
90 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
91 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
92 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *);
93 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
94 cred_t *, int32_t *);
95 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
96
97
98 /*
99 * FC_AL Transport Functions.
100 */
101 static opaque_t emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *,
102 fc_fca_bind_info_t *);
103 static void emlxs_fca_unbind_port(opaque_t);
104 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
105 static int32_t emlxs_fca_get_cap(opaque_t, char *, void *);
106 static int32_t emlxs_fca_set_cap(opaque_t, char *, void *);
107 static int32_t emlxs_fca_get_map(opaque_t, fc_lilpmap_t *);
108 static int32_t emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t,
109 uint32_t *, uint32_t);
110 static int32_t emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *);
111
112 static opaque_t emlxs_fca_get_device(opaque_t, fc_portid_t);
113 static int32_t emlxs_fca_notify(opaque_t, uint32_t);
114 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
115
116 /*
117 * Driver Internal Functions.
118 */
119
120 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
121 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t);
122 #ifdef EMLXS_I386
123 #ifdef S11
124 static int32_t emlxs_quiesce(dev_info_t *);
125 #endif /* S11 */
126 #endif /* EMLXS_I386 */
127 static int32_t emlxs_hba_resume(dev_info_t *);
128 static int32_t emlxs_hba_suspend(dev_info_t *);
129 static int32_t emlxs_hba_detach(dev_info_t *);
130 static int32_t emlxs_hba_attach(dev_info_t *);
131 static void emlxs_lock_destroy(emlxs_hba_t *);
132 static void emlxs_lock_init(emlxs_hba_t *);
133
134 char *emlxs_pm_components[] = {
135 "NAME=" DRIVER_NAME "000",
136 "0=Device D3 State",
137 "1=Device D0 State"
138 };
139
140
141 /*
142 * Default emlx dma limits
143 */
144 ddi_dma_lim_t emlxs_dma_lim = {
145 (uint32_t)0, /* dlim_addr_lo */
146 (uint32_t)0xffffffff, /* dlim_addr_hi */
147 (uint_t)0x00ffffff, /* dlim_cntr_max */
148 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */
149 1, /* dlim_minxfer */
150 0x00ffffff /* dlim_dmaspeed */
151 };
152
153 /*
154 * Be careful when using these attributes; the defaults listed below are
155 * (almost) the most general case, permitting allocation in almost any
156 * way supported by the LightPulse family. The sole exception is the
157 * alignment specified as requiring memory allocation on a 4-byte boundary;
158 * the Lightpulse can DMA memory on any byte boundary.
159 *
160 * The LightPulse family currently is limited to 16M transfers;
161 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
162 */
163 ddi_dma_attr_t emlxs_dma_attr = {
164 DMA_ATTR_V0, /* dma_attr_version */
165 (uint64_t)0, /* dma_attr_addr_lo */
166 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
167 (uint64_t)0x00ffffff, /* dma_attr_count_max */
168 1, /* dma_attr_align */
169 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
170 1, /* dma_attr_minxfer */
171 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
172 (uint64_t)0xffffffff, /* dma_attr_seg */
173 1, /* dma_attr_sgllen */
174 1, /* dma_attr_granular */
175 0 /* dma_attr_flags */
176 };
177
178 ddi_dma_attr_t emlxs_dma_attr_ro = {
179 DMA_ATTR_V0, /* dma_attr_version */
180 (uint64_t)0, /* dma_attr_addr_lo */
181 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
182 (uint64_t)0x00ffffff, /* dma_attr_count_max */
183 1, /* dma_attr_align */
184 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
185 1, /* dma_attr_minxfer */
186 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
187 (uint64_t)0xffffffff, /* dma_attr_seg */
188 1, /* dma_attr_sgllen */
189 1, /* dma_attr_granular */
190 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
191 };
192
193 ddi_dma_attr_t emlxs_dma_attr_1sg = {
194 DMA_ATTR_V0, /* dma_attr_version */
195 (uint64_t)0, /* dma_attr_addr_lo */
196 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
197 (uint64_t)0x00ffffff, /* dma_attr_count_max */
198 1, /* dma_attr_align */
199 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
200 1, /* dma_attr_minxfer */
201 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
202 (uint64_t)0xffffffff, /* dma_attr_seg */
203 1, /* dma_attr_sgllen */
204 1, /* dma_attr_granular */
205 0 /* dma_attr_flags */
206 };
207
208 #if (EMLXS_MODREV >= EMLXS_MODREV3)
209 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
210 DMA_ATTR_V0, /* dma_attr_version */
211 (uint64_t)0, /* dma_attr_addr_lo */
212 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
213 (uint64_t)0x00ffffff, /* dma_attr_count_max */
214 1, /* dma_attr_align */
215 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
216 1, /* dma_attr_minxfer */
217 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
218 (uint64_t)0xffffffff, /* dma_attr_seg */
219 1, /* dma_attr_sgllen */
220 1, /* dma_attr_granular */
221 0 /* dma_attr_flags */
222 };
223 #endif /* >= EMLXS_MODREV3 */
224
225 /*
226 * DDI access attributes for device
227 */
228 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
229 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
230 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */
231 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
232 DDI_DEFAULT_ACC /* devacc_attr_access */
233 };
234
235 /*
236 * DDI access attributes for data
237 */
238 ddi_device_acc_attr_t emlxs_data_acc_attr = {
239 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
240 DDI_NEVERSWAP_ACC, /* don't swap for Data */
241 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
242 DDI_DEFAULT_ACC /* devacc_attr_access */
243 };
244
245 /*
246 * Fill in the FC Transport structure,
247 * as defined in the Fibre Channel Transport Programmming Guide.
248 */
249 #if (EMLXS_MODREV == EMLXS_MODREV5)
250 static fc_fca_tran_t emlxs_fca_tran = {
251 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */
252 MAX_VPORTS, /* fca numerb of ports */
253 sizeof (emlxs_buf_t), /* fca pkt size */
254 2048, /* fca cmd max */
255 &emlxs_dma_lim, /* fca dma limits */
256 0, /* fca iblock, to be filled in later */
257 &emlxs_dma_attr, /* fca dma attributes */
258 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
259 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
260 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
261 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
262 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
263 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
264 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
265 &emlxs_data_acc_attr, /* fca access atributes */
266 0, /* fca_num_npivports */
267 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */
268 emlxs_fca_bind_port,
269 emlxs_fca_unbind_port,
270 emlxs_fca_pkt_init,
271 emlxs_fca_pkt_uninit,
272 emlxs_fca_transport,
273 emlxs_fca_get_cap,
274 emlxs_fca_set_cap,
275 emlxs_fca_get_map,
276 emlxs_fca_transport,
277 emlxs_fca_ub_alloc,
278 emlxs_fca_ub_free,
279 emlxs_fca_ub_release,
280 emlxs_fca_pkt_abort,
281 emlxs_fca_reset,
282 emlxs_fca_port_manage,
283 emlxs_fca_get_device,
284 emlxs_fca_notify
285 };
286 #endif /* EMLXS_MODREV5 */
287
288
289 #if (EMLXS_MODREV == EMLXS_MODREV4)
290 static fc_fca_tran_t emlxs_fca_tran = {
291 FCTL_FCA_MODREV_4, /* fca_version */
292 MAX_VPORTS, /* fca numerb of ports */
293 sizeof (emlxs_buf_t), /* fca pkt size */
294 2048, /* fca cmd max */
295 &emlxs_dma_lim, /* fca dma limits */
296 0, /* fca iblock, to be filled in later */
297 &emlxs_dma_attr, /* fca dma attributes */
298 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
299 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
300 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
301 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
302 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
303 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
304 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
305 &emlxs_data_acc_attr, /* fca access atributes */
306 emlxs_fca_bind_port,
307 emlxs_fca_unbind_port,
308 emlxs_fca_pkt_init,
309 emlxs_fca_pkt_uninit,
310 emlxs_fca_transport,
311 emlxs_fca_get_cap,
312 emlxs_fca_set_cap,
313 emlxs_fca_get_map,
314 emlxs_fca_transport,
315 emlxs_fca_ub_alloc,
316 emlxs_fca_ub_free,
317 emlxs_fca_ub_release,
318 emlxs_fca_pkt_abort,
319 emlxs_fca_reset,
320 emlxs_fca_port_manage,
321 emlxs_fca_get_device,
322 emlxs_fca_notify
323 };
324 #endif /* EMLXS_MODEREV4 */
325
326
327 #if (EMLXS_MODREV == EMLXS_MODREV3)
328 static fc_fca_tran_t emlxs_fca_tran = {
329 FCTL_FCA_MODREV_3, /* fca_version */
330 MAX_VPORTS, /* fca numerb of ports */
331 sizeof (emlxs_buf_t), /* fca pkt size */
332 2048, /* fca cmd max */
333 &emlxs_dma_lim, /* fca dma limits */
334 0, /* fca iblock, to be filled in later */
335 &emlxs_dma_attr, /* fca dma attributes */
336 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
337 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
338 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
339 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
340 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
341 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
342 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
343 &emlxs_data_acc_attr, /* fca access atributes */
344 emlxs_fca_bind_port,
345 emlxs_fca_unbind_port,
346 emlxs_fca_pkt_init,
347 emlxs_fca_pkt_uninit,
348 emlxs_fca_transport,
349 emlxs_fca_get_cap,
350 emlxs_fca_set_cap,
351 emlxs_fca_get_map,
352 emlxs_fca_transport,
353 emlxs_fca_ub_alloc,
354 emlxs_fca_ub_free,
355 emlxs_fca_ub_release,
356 emlxs_fca_pkt_abort,
357 emlxs_fca_reset,
358 emlxs_fca_port_manage,
359 emlxs_fca_get_device,
360 emlxs_fca_notify
361 };
362 #endif /* EMLXS_MODREV3 */
363
364
365 #if (EMLXS_MODREV == EMLXS_MODREV2)
366 static fc_fca_tran_t emlxs_fca_tran = {
367 FCTL_FCA_MODREV_2, /* fca_version */
368 MAX_VPORTS, /* number of ports */
369 sizeof (emlxs_buf_t), /* pkt size */
370 2048, /* max cmds */
371 &emlxs_dma_lim, /* DMA limits */
372 0, /* iblock, to be filled in later */
373 &emlxs_dma_attr, /* dma attributes */
374 &emlxs_data_acc_attr, /* access atributes */
375 emlxs_fca_bind_port,
376 emlxs_fca_unbind_port,
377 emlxs_fca_pkt_init,
378 emlxs_fca_pkt_uninit,
379 emlxs_fca_transport,
380 emlxs_fca_get_cap,
381 emlxs_fca_set_cap,
382 emlxs_fca_get_map,
383 emlxs_fca_transport,
384 emlxs_fca_ub_alloc,
385 emlxs_fca_ub_free,
386 emlxs_fca_ub_release,
387 emlxs_fca_pkt_abort,
388 emlxs_fca_reset,
389 emlxs_fca_port_manage,
390 emlxs_fca_get_device,
391 emlxs_fca_notify
392 };
393 #endif /* EMLXS_MODREV2 */
394
395
396 /*
397 * state pointer which the implementation uses as a place to
398 * hang a set of per-driver structures;
399 *
400 */
401 void *emlxs_soft_state = NULL;
402
403 /*
404 * Driver Global variables.
405 */
406 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */
407
408 emlxs_device_t emlxs_device;
409
410 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */
411 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */
412 uint32_t emlxs_instance_flag = 0; /* uses emlxs_device.lock */
413 #define EMLXS_FW_SHOW 0x00000001
414
415
416 /*
417 * CB ops vector. Used for administration only.
418 */
419 static struct cb_ops emlxs_cb_ops = {
420 emlxs_open, /* cb_open */
421 emlxs_close, /* cb_close */
422 nodev, /* cb_strategy */
423 nodev, /* cb_print */
424 nodev, /* cb_dump */
425 nodev, /* cb_read */
426 nodev, /* cb_write */
427 emlxs_ioctl, /* cb_ioctl */
428 nodev, /* cb_devmap */
429 nodev, /* cb_mmap */
430 nodev, /* cb_segmap */
431 nochpoll, /* cb_chpoll */
432 ddi_prop_op, /* cb_prop_op */
433 0, /* cb_stream */
434 #ifdef _LP64
435 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
436 #else
437 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
438 #endif
439 CB_REV, /* rev */
440 nodev, /* cb_aread */
441 nodev /* cb_awrite */
442 };
443
444 static struct dev_ops emlxs_ops = {
445 DEVO_REV, /* rev */
446 0, /* refcnt */
447 emlxs_info, /* getinfo */
448 nulldev, /* identify */
449 nulldev, /* probe */
450 emlxs_attach, /* attach */
451 emlxs_detach, /* detach */
452 nodev, /* reset */
453 &emlxs_cb_ops, /* devo_cb_ops */
454 NULL, /* devo_bus_ops */
455 emlxs_power, /* power ops */
456 #ifdef EMLXS_I386
457 #ifdef S11
458 emlxs_quiesce, /* quiesce */
459 #endif /* S11 */
460 #endif /* EMLXS_I386 */
461 };
462
463 #include <sys/modctl.h>
464 extern struct mod_ops mod_driverops;
465
466 #ifdef SAN_DIAG_SUPPORT
467 extern kmutex_t emlxs_sd_bucket_mutex;
468 extern sd_bucket_info_t emlxs_sd_bucket;
469 #endif /* SAN_DIAG_SUPPORT */
470
471 /*
472 * Module linkage information for the kernel.
473 */
474 static struct modldrv emlxs_modldrv = {
475 &mod_driverops, /* module type - driver */
476 emlxs_name, /* module name */
477 &emlxs_ops, /* driver ops */
478 };
479
480
481 /*
482 * Driver module linkage structure
483 */
484 static struct modlinkage emlxs_modlinkage = {
485 MODREV_1, /* ml_rev - must be MODREV_1 */
486 &emlxs_modldrv, /* ml_linkage */
487 NULL /* end of driver linkage */
488 };
489
490
491 /* We only need to add entries for non-default return codes. */
492 /* Entries do not need to be in order. */
493 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
494 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */
495
496 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
497 /* {f/w code, pkt_state, pkt_reason, */
498 /* pkt_expln, pkt_action} */
499
500 /* 0x00 - Do not remove */
501 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
502 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
503
504 /* 0x01 - Do not remove */
505 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
506 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
507
508 /* 0x02 */
509 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
510 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
511
512 /*
513 * This is a default entry.
514 * The real codes are written dynamically in emlxs_els.c
515 */
516 /* 0x09 */
517 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
518 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
519
520 /* Special error code */
521 /* 0x10 */
522 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
523 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
524
525 /* Special error code */
526 /* 0x11 */
527 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
528 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
529
530 /* Special error code */
531 /* 0x12 */
532 {IOSTAT_RSP_INVALID, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
533 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
534
535 /* CLASS 2 only */
536 /* 0x04 */
537 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
538 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
539
540 /* CLASS 2 only */
541 /* 0x05 */
542 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
543 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
544
545 /* CLASS 2 only */
546 /* 0x06 */
547 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
548 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
549
550 /* CLASS 2 only */
551 /* 0x07 */
552 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
553 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
554 };
555
556 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
557
558
559 /* We only need to add entries for non-default return codes. */
560 /* Entries do not need to be in order. */
561 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
562 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */
563
564 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
565 /* {f/w code, pkt_state, pkt_reason, */
566 /* pkt_expln, pkt_action} */
567
568 /* 0x01 */
569 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
570 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
571
572 /* 0x02 */
573 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
574 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
575
576 /* 0x04 */
577 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
578 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
579
580 /* 0x05 */
581 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
582 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
583
584 /* 0x06 */
585 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
586 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
587
588 /* 0x07 */
589 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
590 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
591
592 /* 0x08 */
593 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
594 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
595
596 /* 0x0B */
597 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
598 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
599
600 /* 0x0D */
601 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
602 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
603
604 /* 0x0E */
605 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
606 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
607
608 /* 0x0F */
609 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME,
610 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
611
612 /* 0x11 */
613 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
614 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
615
616 /* 0x13 */
617 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
618 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
619
620 /* 0x14 */
621 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
622 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
623
624 /* 0x15 */
625 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
626 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
627
628 /* 0x16 */
629 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
630 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
631
632 /* 0x17 */
633 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
634 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
635
636 /* 0x18 */
637 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
638 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
639
640 /* 0x1A */
641 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
642 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
643
644 /* 0x21 */
645 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
646 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
647
648 /* Occurs at link down */
649 /* 0x28 */
650 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
651 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
652
653 /* 0xF0 */
654 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
655 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
656 };
657
658 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
659
660
661
662 emlxs_table_t emlxs_error_table[] = {
663 {IOERR_SUCCESS, "No error."},
664 {IOERR_MISSING_CONTINUE, "Missing continue."},
665 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
666 {IOERR_INTERNAL_ERROR, "Internal error."},
667 {IOERR_INVALID_RPI, "Invalid RPI."},
668 {IOERR_NO_XRI, "No XRI."},
669 {IOERR_ILLEGAL_COMMAND, "Illegal command."},
670 {IOERR_XCHG_DROPPED, "Exchange dropped."},
671 {IOERR_ILLEGAL_FIELD, "Illegal field."},
672 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
673 {IOERR_TX_DMA_FAILED, "TX DMA failed."},
674 {IOERR_RX_DMA_FAILED, "RX DMA failed."},
675 {IOERR_ILLEGAL_FRAME, "Illegal frame."},
676 {IOERR_NO_RESOURCES, "No resources."},
677 {IOERR_ILLEGAL_LENGTH, "Illegal length."},
678 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
679 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
680 {IOERR_ABORT_REQUESTED, "Abort requested."},
681 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
682 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
683 {IOERR_RING_RESET, "Ring reset."},
684 {IOERR_LINK_DOWN, "Link down."},
685 {IOERR_CORRUPTED_DATA, "Corrupted data."},
686 {IOERR_CORRUPTED_RPI, "Corrupted RPI."},
687 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
688 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
689 {IOERR_DUP_FRAME, "Duplicate frame."},
690 {IOERR_LINK_CONTROL_FRAME, "Link control frame."},
691 {IOERR_BAD_HOST_ADDRESS, "Bad host address."},
692 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
693 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
694 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
695 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
696 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
697 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
698 {IOERR_XRIBUF_MISSING, "XRI buffer missing"},
699 {IOERR_ROFFSET_INVAL, "Relative offset invalid."},
700 {IOERR_ROFFSET_MISSING, "Relative offset missing."},
701 {IOERR_INSUF_BUFFER, "Buffer too small."},
702 {IOERR_MISSING_SI, "ELS frame missing SI"},
703 {IOERR_MISSING_ES, "Exhausted burst without ES"},
704 {IOERR_INCOMP_XFER, "Transfer incomplete."},
705 {IOERR_ABORT_TIMEOUT, "Abort timeout."}
706
707 }; /* emlxs_error_table */
708
709
710 emlxs_table_t emlxs_state_table[] = {
711 {IOSTAT_SUCCESS, "Success."},
712 {IOSTAT_FCP_RSP_ERROR, "FCP response error."},
713 {IOSTAT_REMOTE_STOP, "Remote stop."},
714 {IOSTAT_LOCAL_REJECT, "Local reject."},
715 {IOSTAT_NPORT_RJT, "NPort reject."},
716 {IOSTAT_FABRIC_RJT, "Fabric reject."},
717 {IOSTAT_NPORT_BSY, "Nport busy."},
718 {IOSTAT_FABRIC_BSY, "Fabric busy."},
719 {IOSTAT_INTERMED_RSP, "Intermediate response."},
720 {IOSTAT_LS_RJT, "LS reject."},
721 {IOSTAT_CMD_REJECT, "Cmd reject."},
722 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
723 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
724 {IOSTAT_DATA_UNDERRUN, "Data underrun."},
725 {IOSTAT_DATA_OVERRUN, "Data overrun."},
726 {IOSTAT_RSP_INVALID, "Response Invalid."},
727
728 }; /* emlxs_state_table */
729
730
731 #ifdef MENLO_SUPPORT
732 emlxs_table_t emlxs_menlo_cmd_table[] = {
733 {MENLO_CMD_INITIALIZE, "MENLO_INIT"},
734 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"},
735 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"},
736 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"},
737 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"},
738 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"},
739
740 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"},
741 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"},
742 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"},
743 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"},
744 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"},
745 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"},
746 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"},
747 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"},
748 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"},
749
750 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"},
751 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"},
752 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"},
753
754 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"},
755 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"},
756
757 {MENLO_CMD_RESET, "MENLO_RESET"},
758 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"}
759
760 }; /* emlxs_menlo_cmd_table */
761
762 emlxs_table_t emlxs_menlo_rsp_table[] = {
763 {MENLO_RSP_SUCCESS, "SUCCESS"},
764 {MENLO_ERR_FAILED, "FAILED"},
765 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"},
766 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"},
767 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"},
768 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"},
769 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"},
770 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"},
771 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"},
772 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"},
773 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"},
774 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"},
775 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"},
776 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"},
777 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"},
778 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"},
779 {MENLO_ERR_BUSY, "BUSY"},
780
781 }; /* emlxs_menlo_rsp_table */
782
783 #endif /* MENLO_SUPPORT */
784
785
786 emlxs_table_t emlxs_mscmd_table[] = {
787 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
788 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
789 {MS_GTIN, "MS_GTIN"},
790 {MS_GIEL, "MS_GIEL"},
791 {MS_GIET, "MS_GIET"},
792 {MS_GDID, "MS_GDID"},
793 {MS_GMID, "MS_GMID"},
794 {MS_GFN, "MS_GFN"},
795 {MS_GIELN, "MS_GIELN"},
796 {MS_GMAL, "MS_GMAL"},
797 {MS_GIEIL, "MS_GIEIL"},
798 {MS_GPL, "MS_GPL"},
799 {MS_GPT, "MS_GPT"},
800 {MS_GPPN, "MS_GPPN"},
801 {MS_GAPNL, "MS_GAPNL"},
802 {MS_GPS, "MS_GPS"},
803 {MS_GPSC, "MS_GPSC"},
804 {MS_GATIN, "MS_GATIN"},
805 {MS_GSES, "MS_GSES"},
806 {MS_GPLNL, "MS_GPLNL"},
807 {MS_GPLT, "MS_GPLT"},
808 {MS_GPLML, "MS_GPLML"},
809 {MS_GPAB, "MS_GPAB"},
810 {MS_GNPL, "MS_GNPL"},
811 {MS_GPNL, "MS_GPNL"},
812 {MS_GPFCP, "MS_GPFCP"},
813 {MS_GPLI, "MS_GPLI"},
814 {MS_GNID, "MS_GNID"},
815 {MS_RIELN, "MS_RIELN"},
816 {MS_RPL, "MS_RPL"},
817 {MS_RPLN, "MS_RPLN"},
818 {MS_RPLT, "MS_RPLT"},
819 {MS_RPLM, "MS_RPLM"},
820 {MS_RPAB, "MS_RPAB"},
821 {MS_RPFCP, "MS_RPFCP"},
822 {MS_RPLI, "MS_RPLI"},
823 {MS_DPL, "MS_DPL"},
824 {MS_DPLN, "MS_DPLN"},
825 {MS_DPLM, "MS_DPLM"},
826 {MS_DPLML, "MS_DPLML"},
827 {MS_DPLI, "MS_DPLI"},
828 {MS_DPAB, "MS_DPAB"},
829 {MS_DPALL, "MS_DPALL"}
830
831 }; /* emlxs_mscmd_table */
832
833
834 emlxs_table_t emlxs_ctcmd_table[] = {
835 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
836 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
837 {SLI_CTNS_GA_NXT, "GA_NXT"},
838 {SLI_CTNS_GPN_ID, "GPN_ID"},
839 {SLI_CTNS_GNN_ID, "GNN_ID"},
840 {SLI_CTNS_GCS_ID, "GCS_ID"},
841 {SLI_CTNS_GFT_ID, "GFT_ID"},
842 {SLI_CTNS_GSPN_ID, "GSPN_ID"},
843 {SLI_CTNS_GPT_ID, "GPT_ID"},
844 {SLI_CTNS_GID_PN, "GID_PN"},
845 {SLI_CTNS_GID_NN, "GID_NN"},
846 {SLI_CTNS_GIP_NN, "GIP_NN"},
847 {SLI_CTNS_GIPA_NN, "GIPA_NN"},
848 {SLI_CTNS_GSNN_NN, "GSNN_NN"},
849 {SLI_CTNS_GNN_IP, "GNN_IP"},
850 {SLI_CTNS_GIPA_IP, "GIPA_IP"},
851 {SLI_CTNS_GID_FT, "GID_FT"},
852 {SLI_CTNS_GID_PT, "GID_PT"},
853 {SLI_CTNS_RPN_ID, "RPN_ID"},
854 {SLI_CTNS_RNN_ID, "RNN_ID"},
855 {SLI_CTNS_RCS_ID, "RCS_ID"},
856 {SLI_CTNS_RFT_ID, "RFT_ID"},
857 {SLI_CTNS_RSPN_ID, "RSPN_ID"},
858 {SLI_CTNS_RPT_ID, "RPT_ID"},
859 {SLI_CTNS_RIP_NN, "RIP_NN"},
860 {SLI_CTNS_RIPA_NN, "RIPA_NN"},
861 {SLI_CTNS_RSNN_NN, "RSNN_NN"},
862 {SLI_CTNS_DA_ID, "DA_ID"},
863 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
864
865 }; /* emlxs_ctcmd_table */
866
867
868
869 emlxs_table_t emlxs_rmcmd_table[] = {
870 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
871 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
872 {CT_OP_GSAT, "RM_GSAT"},
873 {CT_OP_GHAT, "RM_GHAT"},
874 {CT_OP_GPAT, "RM_GPAT"},
875 {CT_OP_GDAT, "RM_GDAT"},
876 {CT_OP_GPST, "RM_GPST"},
877 {CT_OP_GDP, "RM_GDP"},
878 {CT_OP_GDPG, "RM_GDPG"},
879 {CT_OP_GEPS, "RM_GEPS"},
880 {CT_OP_GLAT, "RM_GLAT"},
881 {CT_OP_SSAT, "RM_SSAT"},
882 {CT_OP_SHAT, "RM_SHAT"},
883 {CT_OP_SPAT, "RM_SPAT"},
884 {CT_OP_SDAT, "RM_SDAT"},
885 {CT_OP_SDP, "RM_SDP"},
886 {CT_OP_SBBS, "RM_SBBS"},
887 {CT_OP_RPST, "RM_RPST"},
888 {CT_OP_VFW, "RM_VFW"},
889 {CT_OP_DFW, "RM_DFW"},
890 {CT_OP_RES, "RM_RES"},
891 {CT_OP_RHD, "RM_RHD"},
892 {CT_OP_UFW, "RM_UFW"},
893 {CT_OP_RDP, "RM_RDP"},
894 {CT_OP_GHDR, "RM_GHDR"},
895 {CT_OP_CHD, "RM_CHD"},
896 {CT_OP_SSR, "RM_SSR"},
897 {CT_OP_RSAT, "RM_RSAT"},
898 {CT_OP_WSAT, "RM_WSAT"},
899 {CT_OP_RSAH, "RM_RSAH"},
900 {CT_OP_WSAH, "RM_WSAH"},
901 {CT_OP_RACT, "RM_RACT"},
902 {CT_OP_WACT, "RM_WACT"},
903 {CT_OP_RKT, "RM_RKT"},
904 {CT_OP_WKT, "RM_WKT"},
905 {CT_OP_SSC, "RM_SSC"},
906 {CT_OP_QHBA, "RM_QHBA"},
907 {CT_OP_GST, "RM_GST"},
908 {CT_OP_GFTM, "RM_GFTM"},
909 {CT_OP_SRL, "RM_SRL"},
910 {CT_OP_SI, "RM_SI"},
911 {CT_OP_SRC, "RM_SRC"},
912 {CT_OP_GPB, "RM_GPB"},
913 {CT_OP_SPB, "RM_SPB"},
914 {CT_OP_RPB, "RM_RPB"},
915 {CT_OP_RAPB, "RM_RAPB"},
916 {CT_OP_GBC, "RM_GBC"},
917 {CT_OP_GBS, "RM_GBS"},
918 {CT_OP_SBS, "RM_SBS"},
919 {CT_OP_GANI, "RM_GANI"},
920 {CT_OP_GRV, "RM_GRV"},
921 {CT_OP_GAPBS, "RM_GAPBS"},
922 {CT_OP_APBC, "RM_APBC"},
923 {CT_OP_GDT, "RM_GDT"},
924 {CT_OP_GDLMI, "RM_GDLMI"},
925 {CT_OP_GANA, "RM_GANA"},
926 {CT_OP_GDLV, "RM_GDLV"},
927 {CT_OP_GWUP, "RM_GWUP"},
928 {CT_OP_GLM, "RM_GLM"},
929 {CT_OP_GABS, "RM_GABS"},
930 {CT_OP_SABS, "RM_SABS"},
931 {CT_OP_RPR, "RM_RPR"},
932 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
933
934 }; /* emlxs_rmcmd_table */
935
936
937 emlxs_table_t emlxs_elscmd_table[] = {
938 {ELS_CMD_ACC, "ACC"},
939 {ELS_CMD_LS_RJT, "LS_RJT"},
940 {ELS_CMD_PLOGI, "PLOGI"},
941 {ELS_CMD_FLOGI, "FLOGI"},
942 {ELS_CMD_LOGO, "LOGO"},
943 {ELS_CMD_ABTX, "ABTX"},
944 {ELS_CMD_RCS, "RCS"},
945 {ELS_CMD_RES, "RES"},
946 {ELS_CMD_RSS, "RSS"},
947 {ELS_CMD_RSI, "RSI"},
948 {ELS_CMD_ESTS, "ESTS"},
949 {ELS_CMD_ESTC, "ESTC"},
950 {ELS_CMD_ADVC, "ADVC"},
951 {ELS_CMD_RTV, "RTV"},
952 {ELS_CMD_RLS, "RLS"},
953 {ELS_CMD_ECHO, "ECHO"},
954 {ELS_CMD_TEST, "TEST"},
955 {ELS_CMD_RRQ, "RRQ"},
956 {ELS_CMD_REC, "REC"},
957 {ELS_CMD_PRLI, "PRLI"},
958 {ELS_CMD_PRLO, "PRLO"},
959 {ELS_CMD_SCN, "SCN"},
960 {ELS_CMD_TPLS, "TPLS"},
961 {ELS_CMD_GPRLO, "GPRLO"},
962 {ELS_CMD_GAID, "GAID"},
963 {ELS_CMD_FACT, "FACT"},
964 {ELS_CMD_FDACT, "FDACT"},
965 {ELS_CMD_NACT, "NACT"},
966 {ELS_CMD_NDACT, "NDACT"},
967 {ELS_CMD_QoSR, "QoSR"},
968 {ELS_CMD_RVCS, "RVCS"},
969 {ELS_CMD_PDISC, "PDISC"},
970 {ELS_CMD_FDISC, "FDISC"},
971 {ELS_CMD_ADISC, "ADISC"},
972 {ELS_CMD_FARP, "FARP"},
973 {ELS_CMD_FARPR, "FARPR"},
974 {ELS_CMD_FAN, "FAN"},
975 {ELS_CMD_RSCN, "RSCN"},
976 {ELS_CMD_SCR, "SCR"},
977 {ELS_CMD_LINIT, "LINIT"},
978 {ELS_CMD_RNID, "RNID"},
979 {ELS_CMD_AUTH, "AUTH"}
980
981 }; /* emlxs_elscmd_table */
982
983
984 emlxs_table_t emlxs_mode_table[] = {
985 {MODE_NONE, "NONE"},
986 {MODE_INITIATOR, "INITIATOR"},
987 {MODE_TARGET, "TARGET"},
988 {MODE_ALL, "INITIATOR | TARGET"}
989 }; /* emlxs_mode_table */
990
991 /*
992 *
993 * Device Driver Entry Routines
994 *
995 */
996
997 #ifdef MODSYM_SUPPORT
998 static void emlxs_fca_modclose();
999 static int emlxs_fca_modopen();
1000 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */
1001
1002 static int
emlxs_fca_modopen()1003 emlxs_fca_modopen()
1004 {
1005 int err;
1006
1007 if (emlxs_modsym.mod_fctl) {
1008 return (0);
1009 }
1010
1011 /* Leadville (fctl) */
1012 err = 0;
1013 emlxs_modsym.mod_fctl =
1014 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1015 if (!emlxs_modsym.mod_fctl) {
1016 cmn_err(CE_WARN,
1017 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1018 DRIVER_NAME, err);
1019
1020 goto failed;
1021 }
1022
1023 err = 0;
1024 /* Check if the fctl fc_fca_attach is present */
1025 emlxs_modsym.fc_fca_attach =
1026 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1027 &err);
1028 if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1029 cmn_err(CE_WARN,
1030 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1031 goto failed;
1032 }
1033
1034 err = 0;
1035 /* Check if the fctl fc_fca_detach is present */
1036 emlxs_modsym.fc_fca_detach =
1037 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1038 &err);
1039 if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1040 cmn_err(CE_WARN,
1041 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1042 goto failed;
1043 }
1044
1045 err = 0;
1046 /* Check if the fctl fc_fca_init is present */
1047 emlxs_modsym.fc_fca_init =
1048 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1049 if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1050 cmn_err(CE_WARN,
1051 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1052 goto failed;
1053 }
1054
1055 return (0);
1056
1057 failed:
1058
1059 emlxs_fca_modclose();
1060
1061 return (1);
1062
1063
1064 } /* emlxs_fca_modopen() */
1065
1066
1067 static void
emlxs_fca_modclose()1068 emlxs_fca_modclose()
1069 {
1070 if (emlxs_modsym.mod_fctl) {
1071 (void) ddi_modclose(emlxs_modsym.mod_fctl);
1072 emlxs_modsym.mod_fctl = 0;
1073 }
1074
1075 emlxs_modsym.fc_fca_attach = NULL;
1076 emlxs_modsym.fc_fca_detach = NULL;
1077 emlxs_modsym.fc_fca_init = NULL;
1078
1079 return;
1080
1081 } /* emlxs_fca_modclose() */
1082
1083 #endif /* MODSYM_SUPPORT */
1084
1085
1086
1087 /*
1088 * Global driver initialization, called once when driver is loaded
1089 */
1090 int
_init(void)1091 _init(void)
1092 {
1093 int ret;
1094
1095 /*
1096 * First init call for this driver,
1097 * so initialize the emlxs_dev_ctl structure.
1098 */
1099 bzero(&emlxs_device, sizeof (emlxs_device));
1100
1101 #ifdef MODSYM_SUPPORT
1102 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1103 #endif /* MODSYM_SUPPORT */
1104
1105 mutex_init(&emlxs_device.lock, NULL, MUTEX_DRIVER, NULL);
1106
1107 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1108 emlxs_device.drv_timestamp = ddi_get_time();
1109
1110 for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1111 emlxs_instance[ret] = (uint32_t)-1;
1112 }
1113
1114 /*
1115 * Provide for one ddiinst of the emlxs_dev_ctl structure
1116 * for each possible board in the system.
1117 */
1118 if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1119 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1120 cmn_err(CE_WARN,
1121 "?%s: _init: ddi_soft_state_init failed. rval=%x",
1122 DRIVER_NAME, ret);
1123
1124 return (ret);
1125 }
1126
1127 #ifdef MODSYM_SUPPORT
1128 /* Open SFS */
1129 (void) emlxs_fca_modopen();
1130 #endif /* MODSYM_SUPPORT */
1131
1132 /* Setup devops for SFS */
1133 MODSYM(fc_fca_init)(&emlxs_ops);
1134
1135 if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1136 (void) ddi_soft_state_fini(&emlxs_soft_state);
1137 #ifdef MODSYM_SUPPORT
1138 /* Close SFS */
1139 emlxs_fca_modclose();
1140 #endif /* MODSYM_SUPPORT */
1141
1142 return (ret);
1143 }
1144
1145 #ifdef SAN_DIAG_SUPPORT
1146 mutex_init(&emlxs_sd_bucket_mutex, NULL, MUTEX_DRIVER, NULL);
1147 #endif /* SAN_DIAG_SUPPORT */
1148
1149 return (ret);
1150
1151 } /* _init() */
1152
1153
1154 /*
1155 * Called when driver is unloaded.
1156 */
1157 int
_fini(void)1158 _fini(void)
1159 {
1160 int ret;
1161
1162 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1163 return (ret);
1164 }
1165 #ifdef MODSYM_SUPPORT
1166 /* Close SFS */
1167 emlxs_fca_modclose();
1168 #endif /* MODSYM_SUPPORT */
1169
1170 /*
1171 * Destroy the soft state structure
1172 */
1173 (void) ddi_soft_state_fini(&emlxs_soft_state);
1174
1175 /* Destroy the global device lock */
1176 mutex_destroy(&emlxs_device.lock);
1177
1178 #ifdef SAN_DIAG_SUPPORT
1179 mutex_destroy(&emlxs_sd_bucket_mutex);
1180 #endif /* SAN_DIAG_SUPPORT */
1181
1182 return (ret);
1183
1184 } /* _fini() */
1185
1186
1187
1188 int
_info(struct modinfo * modinfop)1189 _info(struct modinfo *modinfop)
1190 {
1191
1192 return (mod_info(&emlxs_modlinkage, modinfop));
1193
1194 } /* _info() */
1195
1196
1197 /*
1198 * Attach an ddiinst of an emlx host adapter.
1199 * Allocate data structures, initialize the adapter and we're ready to fly.
1200 */
1201 static int
emlxs_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1202 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1203 {
1204 emlxs_hba_t *hba;
1205 int ddiinst;
1206 int emlxinst;
1207 int rval;
1208
1209 switch (cmd) {
1210 case DDI_ATTACH:
1211 /* If successful this will set EMLXS_PM_IN_ATTACH */
1212 rval = emlxs_hba_attach(dip);
1213 break;
1214
1215 case DDI_RESUME:
1216 /* This will resume the driver */
1217 rval = emlxs_hba_resume(dip);
1218 break;
1219
1220 default:
1221 rval = DDI_FAILURE;
1222 }
1223
1224 if (rval == DDI_SUCCESS) {
1225 ddiinst = ddi_get_instance(dip);
1226 emlxinst = emlxs_get_instance(ddiinst);
1227 hba = emlxs_device.hba[emlxinst];
1228
1229 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1230
1231 /* Enable driver dump feature */
1232 mutex_enter(&EMLXS_PORT_LOCK);
1233 hba->flag |= FC_DUMP_SAFE;
1234 mutex_exit(&EMLXS_PORT_LOCK);
1235 }
1236 }
1237
1238 return (rval);
1239
1240 } /* emlxs_attach() */
1241
1242
1243 /*
1244 * Detach/prepare driver to unload (see detach(9E)).
1245 */
1246 static int
emlxs_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1247 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1248 {
1249 emlxs_hba_t *hba;
1250 emlxs_port_t *port;
1251 int ddiinst;
1252 int emlxinst;
1253 int rval;
1254
1255 ddiinst = ddi_get_instance(dip);
1256 emlxinst = emlxs_get_instance(ddiinst);
1257 hba = emlxs_device.hba[emlxinst];
1258
1259 if (hba == NULL) {
1260 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1261
1262 return (DDI_FAILURE);
1263 }
1264
1265 if (hba == (emlxs_hba_t *)-1) {
1266 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1267 DRIVER_NAME);
1268
1269 return (DDI_FAILURE);
1270 }
1271
1272 port = &PPORT;
1273 rval = DDI_SUCCESS;
1274
1275 /* Check driver dump */
1276 mutex_enter(&EMLXS_PORT_LOCK);
1277
1278 if (hba->flag & FC_DUMP_ACTIVE) {
1279 mutex_exit(&EMLXS_PORT_LOCK);
1280
1281 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1282 "detach: Driver busy. Driver dump active.");
1283
1284 return (DDI_FAILURE);
1285 }
1286
1287 #ifdef SFCT_SUPPORT
1288 if ((port->flag & EMLXS_TGT_BOUND) &&
1289 ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1290 (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1291 mutex_exit(&EMLXS_PORT_LOCK);
1292
1293 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1294 "detach: Driver busy. Target mode active.");
1295
1296 return (DDI_FAILURE);
1297 }
1298 #endif /* SFCT_SUPPORT */
1299
1300 if (port->flag & EMLXS_INI_BOUND) {
1301 mutex_exit(&EMLXS_PORT_LOCK);
1302
1303 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1304 "detach: Driver busy. Initiator mode active.");
1305
1306 return (DDI_FAILURE);
1307 }
1308
1309 hba->flag &= ~FC_DUMP_SAFE;
1310
1311 mutex_exit(&EMLXS_PORT_LOCK);
1312
1313 switch (cmd) {
1314 case DDI_DETACH:
1315
1316 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1317 "DDI_DETACH");
1318
1319 rval = emlxs_hba_detach(dip);
1320
1321 if (rval != DDI_SUCCESS) {
1322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1323 "Unable to detach.");
1324 }
1325 break;
1326
1327 case DDI_SUSPEND:
1328
1329 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1330 "DDI_SUSPEND");
1331
1332 /* Suspend the driver */
1333 rval = emlxs_hba_suspend(dip);
1334
1335 if (rval != DDI_SUCCESS) {
1336 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1337 "Unable to suspend driver.");
1338 }
1339 break;
1340
1341 default:
1342 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1343 DRIVER_NAME, cmd);
1344 rval = DDI_FAILURE;
1345 }
1346
1347 if (rval == DDI_FAILURE) {
1348 /* Re-Enable driver dump feature */
1349 mutex_enter(&EMLXS_PORT_LOCK);
1350 hba->flag |= FC_DUMP_SAFE;
1351 mutex_exit(&EMLXS_PORT_LOCK);
1352 }
1353
1354 return (rval);
1355
1356 } /* emlxs_detach() */
1357
1358
1359 /* EMLXS_PORT_LOCK must be held when calling this */
1360 extern void
emlxs_port_init(emlxs_port_t * port)1361 emlxs_port_init(emlxs_port_t *port)
1362 {
1363 emlxs_hba_t *hba = HBA;
1364
1365 /* Initialize the base node */
1366 bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1367 port->node_base.nlp_Rpi = 0;
1368 port->node_base.nlp_DID = 0xffffff;
1369 port->node_base.nlp_list_next = NULL;
1370 port->node_base.nlp_list_prev = NULL;
1371 port->node_base.nlp_active = 1;
1372 port->node_base.nlp_base = 1;
1373 port->node_count = 0;
1374
1375 if (!(port->flag & EMLXS_PORT_ENABLED)) {
1376 uint8_t dummy_wwn[8] =
1377 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1378
1379 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1380 sizeof (NAME_TYPE));
1381 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1382 sizeof (NAME_TYPE));
1383 }
1384
1385 if (!(port->flag & EMLXS_PORT_CONFIG)) {
1386 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1387 (sizeof (port->snn)-1));
1388 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn,
1389 (sizeof (port->spn)-1));
1390 }
1391
1392 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1393 sizeof (SERV_PARM));
1394 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1395 sizeof (NAME_TYPE));
1396 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1397 sizeof (NAME_TYPE));
1398
1399 return;
1400
1401 } /* emlxs_port_init() */
1402
1403
1404 void
emlxs_disable_pcie_ce_err(emlxs_hba_t * hba)1405 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1406 {
1407 uint16_t reg;
1408
1409 if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) {
1410 return;
1411 }
1412
1413 /* Turn off the Correctable Error Reporting */
1414 /* (the Device Control Register, bit 0). */
1415 reg = ddi_get16(hba->pci_acc_handle,
1416 (uint16_t *)(hba->pci_addr +
1417 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1418 PCIE_DEVCTL));
1419
1420 reg &= ~1;
1421
1422 (void) ddi_put16(hba->pci_acc_handle,
1423 (uint16_t *)(hba->pci_addr +
1424 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1425 PCIE_DEVCTL),
1426 reg);
1427
1428 return;
1429
1430 } /* emlxs_disable_pcie_ce_err() */
1431
1432
1433 /*
1434 * emlxs_fca_bind_port
1435 *
1436 * Arguments:
1437 *
1438 * dip: the dev_info pointer for the ddiinst
1439 * port_info: pointer to info handed back to the transport
1440 * bind_info: pointer to info from the transport
1441 *
1442 * Return values: a port handle for this port, NULL for failure
1443 *
1444 */
1445 static opaque_t
emlxs_fca_bind_port(dev_info_t * dip,fc_fca_port_info_t * port_info,fc_fca_bind_info_t * bind_info)1446 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1447 fc_fca_bind_info_t *bind_info)
1448 {
1449 emlxs_hba_t *hba;
1450 emlxs_port_t *port;
1451 emlxs_port_t *pport;
1452 emlxs_port_t *vport;
1453 int ddiinst;
1454 emlxs_vpd_t *vpd;
1455 emlxs_config_t *cfg;
1456 char *dptr;
1457 char buffer[16];
1458 uint32_t length;
1459 uint32_t len;
1460 char topology[32];
1461 char linkspeed[32];
1462 uint32_t linkstate;
1463
1464 ddiinst = ddi_get_instance(dip);
1465 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1466 port = &PPORT;
1467 pport = &PPORT;
1468
1469 ddiinst = hba->ddiinst;
1470 vpd = &VPD;
1471 cfg = &CFG;
1472
1473 mutex_enter(&EMLXS_PORT_LOCK);
1474
1475 if (bind_info->port_num > 0) {
1476 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1477 if (!(hba->flag & FC_NPIV_ENABLED) ||
1478 !(bind_info->port_npiv) ||
1479 (bind_info->port_num > hba->vpi_max))
1480 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1481 if (!(hba->flag & FC_NPIV_ENABLED) ||
1482 (bind_info->port_num > hba->vpi_high))
1483 #endif
1484 {
1485 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1486 "fca_bind_port: Port %d not supported.",
1487 bind_info->port_num);
1488
1489 mutex_exit(&EMLXS_PORT_LOCK);
1490
1491 port_info->pi_error = FC_OUTOFBOUNDS;
1492 return (NULL);
1493 }
1494 }
1495
1496 /* Get true port pointer */
1497 port = &VPORT(bind_info->port_num);
1498
1499 /* Make sure the port is not already bound to the transport */
1500 if (port->flag & EMLXS_INI_BOUND) {
1501
1502 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1503 "fca_bind_port: Port %d already bound. flag=%x",
1504 bind_info->port_num, port->flag);
1505
1506 mutex_exit(&EMLXS_PORT_LOCK);
1507
1508 port_info->pi_error = FC_ALREADY;
1509 return (NULL);
1510 }
1511
1512 if (!(pport->flag & EMLXS_INI_ENABLED)) {
1513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1514 "fca_bind_port: Physical port does not support "
1515 "initiator mode.");
1516
1517 mutex_exit(&EMLXS_PORT_LOCK);
1518
1519 port_info->pi_error = FC_OUTOFBOUNDS;
1520 return (NULL);
1521 }
1522
1523 /* Make sure port enable flag is set */
1524 /* Just in case fca_port_unbind is called just prior to fca_port_bind */
1525 /* without a driver attach or resume operation */
1526 port->flag |= EMLXS_PORT_ENABLED;
1527
1528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1529 "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1530 bind_info->port_num, port_info, bind_info);
1531
1532 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1533 if (bind_info->port_npiv) {
1534 /* Leadville is telling us about a new virtual port */
1535 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1536 sizeof (NAME_TYPE));
1537 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1538 sizeof (NAME_TYPE));
1539 if (port->snn[0] == 0) {
1540 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1541 (sizeof (port->snn)-1));
1542
1543 }
1544
1545 if (port->spn[0] == 0) {
1546 (void) snprintf((caddr_t)port->spn,
1547 (sizeof (port->spn)-1), "%s VPort-%d",
1548 (caddr_t)hba->spn, port->vpi);
1549 }
1550 port->flag |= EMLXS_PORT_CONFIG;
1551 }
1552 #endif /* >= EMLXS_MODREV5 */
1553
1554 /*
1555 * Restricted login should apply both physical and
1556 * virtual ports.
1557 */
1558 if (cfg[CFG_VPORT_RESTRICTED].current) {
1559 port->flag |= EMLXS_PORT_RESTRICTED;
1560 }
1561
1562 /* Perform generic port initialization */
1563 emlxs_port_init(port);
1564
1565 /* Perform SFS specific initialization */
1566 port->ulp_handle = bind_info->port_handle;
1567 port->ulp_statec_cb = bind_info->port_statec_cb;
1568 port->ulp_unsol_cb = bind_info->port_unsol_cb;
1569
1570 /* Set the bound flag */
1571 port->flag |= EMLXS_INI_BOUND;
1572 hba->num_of_ports++;
1573
1574 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1575 mutex_exit(&EMLXS_PORT_LOCK);
1576 (void) emlxs_vpi_port_bind_notify(port);
1577 mutex_enter(&EMLXS_PORT_LOCK);
1578
1579 linkstate = (port->vpip->state == VPI_STATE_PORT_ONLINE) ?
1580 FC_LINK_UP : FC_LINK_DOWN;
1581 } else {
1582 linkstate = hba->state;
1583 }
1584
1585 /* Update the port info structure */
1586
1587 /* Set the topology and state */
1588 if (port->mode == MODE_TARGET) {
1589 port_info->pi_port_state = FC_STATE_OFFLINE;
1590 port_info->pi_topology = FC_TOP_UNKNOWN;
1591 } else if ((linkstate < FC_LINK_UP) ||
1592 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLED) ||
1593 !(hba->flag & FC_NPIV_SUPPORTED)))) {
1594 port_info->pi_port_state = FC_STATE_OFFLINE;
1595 port_info->pi_topology = FC_TOP_UNKNOWN;
1596 }
1597 #ifdef MENLO_SUPPORT
1598 else if (hba->flag & FC_MENLO_MODE) {
1599 port_info->pi_port_state = FC_STATE_OFFLINE;
1600 port_info->pi_topology = FC_TOP_UNKNOWN;
1601 }
1602 #endif /* MENLO_SUPPORT */
1603 else {
1604 /* Check for loop topology */
1605 if (hba->topology == TOPOLOGY_LOOP) {
1606 port_info->pi_port_state = FC_STATE_LOOP;
1607 (void) strlcpy(topology, ", loop", sizeof (topology));
1608
1609 if (hba->flag & FC_FABRIC_ATTACHED) {
1610 port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1611 } else {
1612 port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1613 }
1614 } else {
1615 port_info->pi_topology = FC_TOP_FABRIC;
1616 port_info->pi_port_state = FC_STATE_ONLINE;
1617 (void) strlcpy(topology, ", fabric", sizeof (topology));
1618 }
1619
1620 /* Set the link speed */
1621 switch (hba->linkspeed) {
1622 case 0:
1623 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1624 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1625 break;
1626
1627 case LA_1GHZ_LINK:
1628 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1629 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1630 break;
1631 case LA_2GHZ_LINK:
1632 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1633 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1634 break;
1635 case LA_4GHZ_LINK:
1636 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1637 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1638 break;
1639 case LA_8GHZ_LINK:
1640 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1641 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1642 break;
1643 case LA_10GHZ_LINK:
1644 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1645 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1646 break;
1647 case LA_16GHZ_LINK:
1648 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1649 port_info->pi_port_state |= FC_STATE_16GBIT_SPEED;
1650 break;
1651 case LA_32GHZ_LINK:
1652 (void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed));
1653 port_info->pi_port_state |= FC_STATE_32GBIT_SPEED;
1654 break;
1655 default:
1656 (void) snprintf(linkspeed, sizeof (linkspeed),
1657 "unknown(0x%x)", hba->linkspeed);
1658 break;
1659 }
1660
1661 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
1662 /* Adjusting port context for link up messages */
1663 vport = port;
1664 port = &PPORT;
1665 if (vport->vpi == 0) {
1666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1667 "%s%s, initiator",
1668 linkspeed, topology);
1669 } else if (!(hba->flag & FC_NPIV_LINKUP)) {
1670 hba->flag |= FC_NPIV_LINKUP;
1671 EMLXS_MSGF(EMLXS_CONTEXT,
1672 &emlxs_npiv_link_up_msg,
1673 "%s%s, initiator", linkspeed, topology);
1674 }
1675 port = vport;
1676 }
1677 }
1678
1679 /* PCIE Correctable Error Reporting workaround */
1680 if (((hba->model_info.chip == EMLXS_BE2_CHIP) ||
1681 (hba->model_info.chip == EMLXS_BE3_CHIP)) &&
1682 (bind_info->port_num == 0)) {
1683 emlxs_disable_pcie_ce_err(hba);
1684 }
1685
1686 /* Save initial state */
1687 port->ulp_statec = port_info->pi_port_state;
1688
1689 /*
1690 * The transport needs a copy of the common service parameters
1691 * for this port. The transport can get any updates through
1692 * the getcap entry point.
1693 */
1694 bcopy((void *) &port->sparam,
1695 (void *) &port_info->pi_login_params.common_service,
1696 sizeof (SERV_PARM));
1697
1698 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1699 /* Swap the service parameters for ULP */
1700 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1701 common_service);
1702 #endif /* EMLXS_MODREV2X */
1703
1704 port_info->pi_login_params.common_service.btob_credit = 0xffff;
1705
1706 bcopy((void *) &port->wwnn,
1707 (void *) &port_info->pi_login_params.node_ww_name,
1708 sizeof (NAME_TYPE));
1709
1710 bcopy((void *) &port->wwpn,
1711 (void *) &port_info->pi_login_params.nport_ww_name,
1712 sizeof (NAME_TYPE));
1713
1714 /*
1715 * We need to turn off CLASS2 support.
1716 * Otherwise, FC transport will use CLASS2 as default class
1717 * and never try with CLASS3.
1718 */
1719 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1720 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1721 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1722 port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1723 }
1724
1725 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1726 port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1727 }
1728 #else /* EMLXS_SPARC or EMLXS_MODREV2X */
1729 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1730 port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1731 }
1732
1733 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1734 port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1735 }
1736 #endif /* >= EMLXS_MODREV3X */
1737 #endif /* >= EMLXS_MODREV3 */
1738
1739
1740 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1741 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1742 port_info->pi_login_params.class_1.data[0] &= ~0x80;
1743 }
1744
1745 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1746 port_info->pi_login_params.class_2.data[0] &= ~0x80;
1747 }
1748 #endif /* <= EMLXS_MODREV2 */
1749
1750 /* Additional parameters */
1751 port_info->pi_s_id.port_id = port->did;
1752 port_info->pi_s_id.priv_lilp_posit = 0;
1753 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1754
1755 /* Initialize the RNID parameters */
1756 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1757
1758 (void) snprintf((char *)port_info->pi_rnid_params.params.global_id,
1759 (sizeof (port_info->pi_rnid_params.params.global_id)-1),
1760 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1761 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1762 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1763 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1764
1765 port_info->pi_rnid_params.params.unit_type = RNID_HBA;
1766 port_info->pi_rnid_params.params.port_id = port->did;
1767 port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1768
1769 /* Initialize the port attributes */
1770 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1771
1772 (void) strncpy(port_info->pi_attrs.manufacturer,
1773 hba->model_info.manufacturer,
1774 (sizeof (port_info->pi_attrs.manufacturer)-1));
1775
1776 port_info->pi_rnid_params.status = FC_SUCCESS;
1777
1778 (void) strncpy(port_info->pi_attrs.serial_number, vpd->serial_num,
1779 (sizeof (port_info->pi_attrs.serial_number)-1));
1780
1781 (void) snprintf(port_info->pi_attrs.firmware_version,
1782 (sizeof (port_info->pi_attrs.firmware_version)-1), "%s (%s)",
1783 vpd->fw_version, vpd->fw_label);
1784
1785 #ifdef EMLXS_I386
1786 (void) snprintf(port_info->pi_attrs.option_rom_version,
1787 (sizeof (port_info->pi_attrs.option_rom_version)-1),
1788 "Boot:%s", vpd->boot_version);
1789 #else /* EMLXS_SPARC */
1790 (void) snprintf(port_info->pi_attrs.option_rom_version,
1791 (sizeof (port_info->pi_attrs.option_rom_version)-1),
1792 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1793 #endif /* EMLXS_I386 */
1794
1795 (void) snprintf(port_info->pi_attrs.driver_version,
1796 (sizeof (port_info->pi_attrs.driver_version)-1), "%s (%s)",
1797 emlxs_version, emlxs_revision);
1798
1799 (void) strncpy(port_info->pi_attrs.driver_name, DRIVER_NAME,
1800 (sizeof (port_info->pi_attrs.driver_name)-1));
1801
1802 port_info->pi_attrs.vendor_specific_id =
1803 (hba->model_info.device_id << 16) | hba->model_info.vendor_id;
1804
1805 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1806
1807 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1808
1809 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1810 port_info->pi_rnid_params.params.num_attached = 0;
1811
1812 if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) {
1813 uint8_t byte;
1814 uint8_t *wwpn;
1815 uint32_t i;
1816 uint32_t j;
1817
1818 /* Copy the WWPN as a string into the local buffer */
1819 wwpn = (uint8_t *)&hba->wwpn;
1820 for (i = 0; i < 16; i++) {
1821 byte = *wwpn++;
1822 j = ((byte & 0xf0) >> 4);
1823 if (j <= 9) {
1824 buffer[i] =
1825 (char)((uint8_t)'0' + (uint8_t)j);
1826 } else {
1827 buffer[i] =
1828 (char)((uint8_t)'A' + (uint8_t)(j -
1829 10));
1830 }
1831
1832 i++;
1833 j = (byte & 0xf);
1834 if (j <= 9) {
1835 buffer[i] =
1836 (char)((uint8_t)'0' + (uint8_t)j);
1837 } else {
1838 buffer[i] =
1839 (char)((uint8_t)'A' + (uint8_t)(j -
1840 10));
1841 }
1842 }
1843
1844 port_info->pi_attrs.hba_fru_details.port_index = 0;
1845 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4))
1846
1847 } else if (hba->flag & FC_NPIV_ENABLED) {
1848 uint8_t byte;
1849 uint8_t *wwpn;
1850 uint32_t i;
1851 uint32_t j;
1852
1853 /* Copy the WWPN as a string into the local buffer */
1854 wwpn = (uint8_t *)&hba->wwpn;
1855 for (i = 0; i < 16; i++) {
1856 byte = *wwpn++;
1857 j = ((byte & 0xf0) >> 4);
1858 if (j <= 9) {
1859 buffer[i] =
1860 (char)((uint8_t)'0' + (uint8_t)j);
1861 } else {
1862 buffer[i] =
1863 (char)((uint8_t)'A' + (uint8_t)(j -
1864 10));
1865 }
1866
1867 i++;
1868 j = (byte & 0xf);
1869 if (j <= 9) {
1870 buffer[i] =
1871 (char)((uint8_t)'0' + (uint8_t)j);
1872 } else {
1873 buffer[i] =
1874 (char)((uint8_t)'A' + (uint8_t)(j -
1875 10));
1876 }
1877 }
1878
1879 port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1880 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1881
1882 } else {
1883 /* Copy the serial number string (right most 16 chars) */
1884 /* into the right justified local buffer */
1885 bzero(buffer, sizeof (buffer));
1886 length = strlen(vpd->serial_num);
1887 len = (length > 16) ? 16 : length;
1888 bcopy(&vpd->serial_num[(length - len)],
1889 &buffer[(sizeof (buffer) - len)], len);
1890
1891 port_info->pi_attrs.hba_fru_details.port_index =
1892 vpd->port_index;
1893 }
1894
1895 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1896 dptr[0] = buffer[0];
1897 dptr[1] = buffer[1];
1898 dptr[2] = buffer[2];
1899 dptr[3] = buffer[3];
1900 dptr[4] = buffer[4];
1901 dptr[5] = buffer[5];
1902 dptr[6] = buffer[6];
1903 dptr[7] = buffer[7];
1904 port_info->pi_attrs.hba_fru_details.high =
1905 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1906
1907 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1908 dptr[0] = buffer[8];
1909 dptr[1] = buffer[9];
1910 dptr[2] = buffer[10];
1911 dptr[3] = buffer[11];
1912 dptr[4] = buffer[12];
1913 dptr[5] = buffer[13];
1914 dptr[6] = buffer[14];
1915 dptr[7] = buffer[15];
1916 port_info->pi_attrs.hba_fru_details.low =
1917 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1918
1919 #endif /* >= EMLXS_MODREV3 */
1920
1921 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1922 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1923 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1924 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1925 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1926 #endif /* >= EMLXS_MODREV4 */
1927
1928 (void) snprintf(port_info->pi_attrs.hardware_version,
1929 (sizeof (port_info->pi_attrs.hardware_version)-1),
1930 "%x", vpd->biuRev);
1931
1932 /* Set the hba speed limit */
1933 if (vpd->link_speed & LMT_32GB_CAPABLE) {
1934 port_info->pi_attrs.supported_speed |=
1935 FC_HBA_PORTSPEED_32GBIT;
1936 }
1937 if (vpd->link_speed & LMT_16GB_CAPABLE) {
1938 port_info->pi_attrs.supported_speed |=
1939 FC_HBA_PORTSPEED_16GBIT;
1940 }
1941 if (vpd->link_speed & LMT_10GB_CAPABLE) {
1942 port_info->pi_attrs.supported_speed |=
1943 FC_HBA_PORTSPEED_10GBIT;
1944 }
1945 if (vpd->link_speed & LMT_8GB_CAPABLE) {
1946 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1947 }
1948 if (vpd->link_speed & LMT_4GB_CAPABLE) {
1949 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1950 }
1951 if (vpd->link_speed & LMT_2GB_CAPABLE) {
1952 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1953 }
1954 if (vpd->link_speed & LMT_1GB_CAPABLE) {
1955 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1956 }
1957
1958 /* Set the hba model info */
1959 (void) strncpy(port_info->pi_attrs.model, hba->model_info.model,
1960 (sizeof (port_info->pi_attrs.model)-1));
1961 (void) strncpy(port_info->pi_attrs.model_description,
1962 hba->model_info.model_desc,
1963 (sizeof (port_info->pi_attrs.model_description)-1));
1964
1965
1966 /* Log information */
1967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1968 "Bind info: port_num = %d", bind_info->port_num);
1969 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1970 "Bind info: port_handle = %p", bind_info->port_handle);
1971
1972 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1974 "Bind info: port_npiv = %d", bind_info->port_npiv);
1975 #endif /* >= EMLXS_MODREV5 */
1976
1977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1978 "Port info: pi_topology = %x", port_info->pi_topology);
1979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1980 "Port info: pi_error = %x", port_info->pi_error);
1981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1982 "Port info: pi_port_state = %x", port_info->pi_port_state);
1983
1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 "Port info: port_id = %x", port_info->pi_s_id.port_id);
1986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1987 "Port info: priv_lilp_posit = %x",
1988 port_info->pi_s_id.priv_lilp_posit);
1989
1990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1991 "Port info: hard_addr = %x",
1992 port_info->pi_hard_addr.hard_addr);
1993
1994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1995 "Port info: rnid.status = %x",
1996 port_info->pi_rnid_params.status);
1997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1998 "Port info: rnid.global_id = %16s",
1999 port_info->pi_rnid_params.params.global_id);
2000 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2001 "Port info: rnid.unit_type = %x",
2002 port_info->pi_rnid_params.params.unit_type);
2003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2004 "Port info: rnid.port_id = %x",
2005 port_info->pi_rnid_params.params.port_id);
2006 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2007 "Port info: rnid.num_attached = %x",
2008 port_info->pi_rnid_params.params.num_attached);
2009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2010 "Port info: rnid.ip_version = %x",
2011 port_info->pi_rnid_params.params.ip_version);
2012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2013 "Port info: rnid.udp_port = %x",
2014 port_info->pi_rnid_params.params.udp_port);
2015 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2016 "Port info: rnid.ip_addr = %16s",
2017 port_info->pi_rnid_params.params.ip_addr);
2018 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2019 "Port info: rnid.spec_id_resv = %x",
2020 port_info->pi_rnid_params.params.specific_id_resv);
2021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2022 "Port info: rnid.topo_flags = %x",
2023 port_info->pi_rnid_params.params.topo_flags);
2024
2025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2026 "Port info: manufacturer = %s",
2027 port_info->pi_attrs.manufacturer);
2028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2029 "Port info: serial_num = %s",
2030 port_info->pi_attrs.serial_number);
2031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2032 "Port info: model = %s", port_info->pi_attrs.model);
2033 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2034 "Port info: model_description = %s",
2035 port_info->pi_attrs.model_description);
2036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2037 "Port info: hardware_version = %s",
2038 port_info->pi_attrs.hardware_version);
2039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2040 "Port info: driver_version = %s",
2041 port_info->pi_attrs.driver_version);
2042 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2043 "Port info: option_rom_version = %s",
2044 port_info->pi_attrs.option_rom_version);
2045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2046 "Port info: firmware_version = %s",
2047 port_info->pi_attrs.firmware_version);
2048 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2049 "Port info: driver_name = %s",
2050 port_info->pi_attrs.driver_name);
2051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2052 "Port info: vendor_specific_id = %x",
2053 port_info->pi_attrs.vendor_specific_id);
2054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2055 "Port info: supported_cos = %x",
2056 port_info->pi_attrs.supported_cos);
2057 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2058 "Port info: supported_speed = %x",
2059 port_info->pi_attrs.supported_speed);
2060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2061 "Port info: max_frame_size = %x",
2062 port_info->pi_attrs.max_frame_size);
2063
2064 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2065 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2066 "Port info: fru_port_index = %x",
2067 port_info->pi_attrs.hba_fru_details.port_index);
2068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2069 "Port info: fru_high = %llx",
2070 port_info->pi_attrs.hba_fru_details.high);
2071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2072 "Port info: fru_low = %llx",
2073 port_info->pi_attrs.hba_fru_details.low);
2074 #endif /* >= EMLXS_MODREV3 */
2075
2076 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2077 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2078 "Port info: sym_node_name = %s",
2079 port_info->pi_attrs.sym_node_name);
2080 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2081 "Port info: sym_port_name = %s",
2082 port_info->pi_attrs.sym_port_name);
2083 #endif /* >= EMLXS_MODREV4 */
2084
2085 mutex_exit(&EMLXS_PORT_LOCK);
2086
2087 #ifdef SFCT_SUPPORT
2088 if (port->flag & EMLXS_TGT_ENABLED) {
2089 emlxs_fct_bind_port(port);
2090 }
2091 #endif /* SFCT_SUPPORT */
2092
2093 return ((opaque_t)port);
2094
2095 } /* emlxs_fca_bind_port() */
2096
2097
2098 static void
emlxs_fca_unbind_port(opaque_t fca_port_handle)2099 emlxs_fca_unbind_port(opaque_t fca_port_handle)
2100 {
2101 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2102 emlxs_hba_t *hba = HBA;
2103
2104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2105 "fca_unbind_port: port=%p", port);
2106
2107 if (!(port->flag & EMLXS_PORT_BOUND)) {
2108 return;
2109 }
2110
2111 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2112 (void) emlxs_vpi_port_unbind_notify(port, 1);
2113 }
2114
2115 /* Destroy & flush all port nodes, if they exist */
2116 if (port->node_count) {
2117 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2118 }
2119
2120 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2121 if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) &&
2122 (hba->flag & FC_NPIV_ENABLED) &&
2123 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED))) {
2124 (void) emlxs_mb_unreg_vpi(port);
2125 }
2126 #endif
2127
2128 mutex_enter(&EMLXS_PORT_LOCK);
2129 if (port->flag & EMLXS_INI_BOUND) {
2130 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2131 port->flag &= ~(EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
2132 #endif
2133 port->flag &= ~EMLXS_INI_BOUND;
2134 hba->num_of_ports--;
2135
2136 /* Wait until ulp callback interface is idle */
2137 while (port->ulp_busy) {
2138 mutex_exit(&EMLXS_PORT_LOCK);
2139 delay(drv_usectohz(500000));
2140 mutex_enter(&EMLXS_PORT_LOCK);
2141 }
2142
2143 port->ulp_handle = 0;
2144 port->ulp_statec = FC_STATE_OFFLINE;
2145 port->ulp_statec_cb = NULL;
2146 port->ulp_unsol_cb = NULL;
2147 }
2148 mutex_exit(&EMLXS_PORT_LOCK);
2149
2150 #ifdef SFCT_SUPPORT
2151 /* Check if port was target bound */
2152 if (port->flag & EMLXS_TGT_BOUND) {
2153 emlxs_fct_unbind_port(port);
2154 }
2155 #endif /* SFCT_SUPPORT */
2156
2157 return;
2158
2159 } /* emlxs_fca_unbind_port() */
2160
2161
2162 /*ARGSUSED*/
2163 extern int
emlxs_fca_pkt_init(opaque_t fca_port_handle,fc_packet_t * pkt,int32_t sleep)2164 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2165 {
2166 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2167 emlxs_hba_t *hba = HBA;
2168 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2169
2170 if (!sbp) {
2171 return (FC_FAILURE);
2172 }
2173 bzero((void *)sbp, sizeof (emlxs_buf_t));
2174
2175 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
2176 sbp->pkt_flags =
2177 PACKET_VALID | PACKET_ULP_OWNED;
2178 sbp->port = port;
2179 sbp->pkt = pkt;
2180 sbp->iocbq.sbp = sbp;
2181
2182 return (FC_SUCCESS);
2183
2184 } /* emlxs_fca_pkt_init() */
2185
2186
2187
2188 static void
emlxs_initialize_pkt(emlxs_port_t * port,emlxs_buf_t * sbp)2189 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2190 {
2191 emlxs_hba_t *hba = HBA;
2192 emlxs_config_t *cfg = &CFG;
2193 fc_packet_t *pkt = PRIV2PKT(sbp);
2194
2195 mutex_enter(&sbp->mtx);
2196
2197 /* Reinitialize */
2198 sbp->pkt = pkt;
2199 sbp->port = port;
2200 sbp->bmp = NULL;
2201 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2202 sbp->iotag = 0;
2203 sbp->ticks = 0;
2204 sbp->abort_attempts = 0;
2205 sbp->fpkt = NULL;
2206 sbp->flush_count = 0;
2207 sbp->next = NULL;
2208
2209 if (port->mode == MODE_INITIATOR) {
2210 sbp->node = NULL;
2211 sbp->did = 0;
2212 sbp->lun = EMLXS_LUN_NONE;
2213 sbp->class = 0;
2214 sbp->channel = NULL;
2215 }
2216
2217 bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2218 sbp->iocbq.sbp = sbp;
2219
2220 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2221 ddi_in_panic()) {
2222 sbp->pkt_flags |= PACKET_POLLED;
2223 }
2224
2225 /* Prepare the fc packet */
2226 pkt->pkt_state = FC_PKT_SUCCESS;
2227 pkt->pkt_reason = 0;
2228 pkt->pkt_action = 0;
2229 pkt->pkt_expln = 0;
2230 pkt->pkt_data_resid = 0;
2231 pkt->pkt_resp_resid = 0;
2232
2233 /* Make sure all pkt's have a proper timeout */
2234 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2235 /* This disables all IOCB on chip timeouts */
2236 pkt->pkt_timeout = 0x80000000;
2237 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2238 pkt->pkt_timeout = 60;
2239 }
2240
2241 /* Clear the response buffer */
2242 if (pkt->pkt_rsplen) {
2243 bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2244 }
2245
2246 mutex_exit(&sbp->mtx);
2247
2248 return;
2249
2250 } /* emlxs_initialize_pkt() */
2251
2252
2253
2254 /*
2255 * We may not need this routine
2256 */
2257 /*ARGSUSED*/
2258 extern int
emlxs_fca_pkt_uninit(opaque_t fca_port_handle,fc_packet_t * pkt)2259 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2260 {
2261 emlxs_buf_t *sbp = PKT2PRIV(pkt);
2262
2263 if (!sbp) {
2264 return (FC_FAILURE);
2265 }
2266
2267 if (!(sbp->pkt_flags & PACKET_VALID)) {
2268 return (FC_FAILURE);
2269 }
2270 sbp->pkt_flags &= ~PACKET_VALID;
2271 mutex_destroy(&sbp->mtx);
2272
2273 return (FC_SUCCESS);
2274
2275 } /* emlxs_fca_pkt_uninit() */
2276
2277
2278 static int
emlxs_fca_get_cap(opaque_t fca_port_handle,char * cap,void * ptr)2279 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2280 {
2281 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2282 emlxs_hba_t *hba = HBA;
2283 int32_t rval;
2284 emlxs_config_t *cfg = &CFG;
2285
2286 if (!(port->flag & EMLXS_INI_BOUND)) {
2287 return (FC_CAP_ERROR);
2288 }
2289
2290 if (strcmp(cap, FC_NODE_WWN) == 0) {
2291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2292 "fca_get_cap: FC_NODE_WWN");
2293
2294 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2295 rval = FC_CAP_FOUND;
2296
2297 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2298 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2299 "fca_get_cap: FC_LOGIN_PARAMS");
2300
2301 /*
2302 * We need to turn off CLASS2 support.
2303 * Otherwise, FC transport will use CLASS2 as default class
2304 * and never try with CLASS3.
2305 */
2306 hba->sparam.cls2.classValid = 0;
2307
2308 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2309
2310 rval = FC_CAP_FOUND;
2311
2312 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2313 int32_t *num_bufs;
2314
2315 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2316 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2317 cfg[CFG_UB_BUFS].current);
2318
2319 num_bufs = (int32_t *)ptr;
2320
2321 /* We multiply by MAX_VPORTS because ULP uses a */
2322 /* formula to calculate ub bufs from this */
2323 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2324
2325 rval = FC_CAP_FOUND;
2326
2327 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2328 int32_t *size;
2329
2330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2331 "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2332
2333 size = (int32_t *)ptr;
2334 *size = -1;
2335 rval = FC_CAP_FOUND;
2336
2337 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2338 fc_reset_action_t *action;
2339
2340 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2341 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2342
2343 action = (fc_reset_action_t *)ptr;
2344 *action = FC_RESET_RETURN_ALL;
2345 rval = FC_CAP_FOUND;
2346
2347 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2348 fc_dma_behavior_t *behavior;
2349
2350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2351 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2352
2353 behavior = (fc_dma_behavior_t *)ptr;
2354 *behavior = FC_ALLOW_STREAMING;
2355 rval = FC_CAP_FOUND;
2356
2357 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2358 fc_fcp_dma_t *fcp_dma;
2359
2360 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2361 "fca_get_cap: FC_CAP_FCP_DMA");
2362
2363 fcp_dma = (fc_fcp_dma_t *)ptr;
2364 *fcp_dma = FC_DVMA_SPACE;
2365 rval = FC_CAP_FOUND;
2366
2367 } else {
2368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2369 "fca_get_cap: Unknown capability. [%s]", cap);
2370
2371 rval = FC_CAP_ERROR;
2372
2373 }
2374
2375 return (rval);
2376
2377 } /* emlxs_fca_get_cap() */
2378
2379
2380
2381 static int
emlxs_fca_set_cap(opaque_t fca_port_handle,char * cap,void * ptr)2382 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2383 {
2384 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2385
2386 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2387 "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2388
2389 return (FC_CAP_ERROR);
2390
2391 } /* emlxs_fca_set_cap() */
2392
2393
2394 static opaque_t
emlxs_fca_get_device(opaque_t fca_port_handle,fc_portid_t d_id)2395 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2396 {
2397 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2398
2399 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2400 "fca_get_device: did=%x", d_id.port_id);
2401
2402 return (NULL);
2403
2404 } /* emlxs_fca_get_device() */
2405
2406
2407 static int32_t
emlxs_fca_notify(opaque_t fca_port_handle,uint32_t cmd)2408 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd)
2409 {
2410 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2411
2412 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2413 cmd);
2414
2415 return (FC_SUCCESS);
2416
2417 } /* emlxs_fca_notify */
2418
2419
2420
2421 static int
emlxs_fca_get_map(opaque_t fca_port_handle,fc_lilpmap_t * mapbuf)2422 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2423 {
2424 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2425 emlxs_hba_t *hba = HBA;
2426 uint32_t lilp_length;
2427
2428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2429 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2430 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2431 port->alpa_map[3], port->alpa_map[4]);
2432
2433 if (!(port->flag & EMLXS_INI_BOUND)) {
2434 return (FC_NOMAP);
2435 }
2436
2437 if (hba->topology != TOPOLOGY_LOOP) {
2438 return (FC_NOMAP);
2439 }
2440
2441 /* Check if alpa map is available */
2442 if (port->alpa_map[0] != 0) {
2443 mapbuf->lilp_magic = MAGIC_LILP;
2444 } else { /* No LILP map available */
2445
2446 /* Set lilp_magic to MAGIC_LISA and this will */
2447 /* trigger an ALPA scan in ULP */
2448 mapbuf->lilp_magic = MAGIC_LISA;
2449 }
2450
2451 mapbuf->lilp_myalpa = port->did;
2452
2453 /* The first byte of the alpa_map is the lilp map length */
2454 /* Add one to include the lilp length byte itself */
2455 lilp_length = (uint32_t)port->alpa_map[0] + 1;
2456
2457 /* Make sure the max transfer is 128 bytes */
2458 if (lilp_length > 128) {
2459 lilp_length = 128;
2460 }
2461
2462 /* We start copying from the lilp_length field */
2463 /* in order to get a word aligned address */
2464 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2465 lilp_length);
2466
2467 return (FC_SUCCESS);
2468
2469 } /* emlxs_fca_get_map() */
2470
2471
2472
2473 extern int
emlxs_fca_transport(opaque_t fca_port_handle,fc_packet_t * pkt)2474 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2475 {
2476 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2477 emlxs_hba_t *hba = HBA;
2478 emlxs_buf_t *sbp;
2479 uint32_t rval;
2480 uint32_t pkt_flags;
2481
2482 /* Validate packet */
2483 sbp = PKT2PRIV(pkt);
2484
2485 /* Make sure adapter is online */
2486 if (!(hba->flag & FC_ONLINE_MODE) &&
2487 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2488 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2489 "Adapter offline.");
2490
2491 rval = (hba->flag & FC_ONLINING_MODE) ?
2492 FC_TRAN_BUSY : FC_OFFLINE;
2493 return (rval);
2494 }
2495
2496 /* Make sure ULP was told that the port was online */
2497 if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2498 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2499 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2500 "Port offline.");
2501
2502 return (FC_OFFLINE);
2503 }
2504
2505 if (sbp->port != port) {
2506 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2507 "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2508 sbp->port, sbp->pkt_flags);
2509 return (FC_BADPACKET);
2510 }
2511
2512 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2514 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2515 sbp->port, sbp->pkt_flags);
2516 return (FC_BADPACKET);
2517 }
2518
2519 #ifdef SFCT_SUPPORT
2520 if ((port->mode == MODE_TARGET) && !sbp->fct_cmd &&
2521 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2523 "Packet blocked. Target mode.");
2524 return (FC_TRANSPORT_ERROR);
2525 }
2526 #endif /* SFCT_SUPPORT */
2527
2528 #ifdef IDLE_TIMER
2529 emlxs_pm_busy_component(hba);
2530 #endif /* IDLE_TIMER */
2531
2532 /* Prepare the packet for transport */
2533 emlxs_initialize_pkt(port, sbp);
2534
2535 /* Save a copy of the pkt flags. */
2536 /* We will check the polling flag later */
2537 pkt_flags = sbp->pkt_flags;
2538
2539 /* Send the packet */
2540 switch (pkt->pkt_tran_type) {
2541 case FC_PKT_FCP_READ:
2542 case FC_PKT_FCP_WRITE:
2543 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2544 break;
2545
2546 case FC_PKT_IP_WRITE:
2547 case FC_PKT_BROADCAST:
2548 rval = emlxs_send_ip(port, sbp);
2549 break;
2550
2551 case FC_PKT_EXCHANGE:
2552 switch (pkt->pkt_cmd_fhdr.type) {
2553 case FC_TYPE_SCSI_FCP:
2554 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2555 break;
2556
2557 case FC_TYPE_FC_SERVICES:
2558 rval = emlxs_send_ct(port, sbp);
2559 break;
2560
2561 #ifdef MENLO_SUPPORT
2562 case EMLXS_MENLO_TYPE:
2563 rval = emlxs_send_menlo(port, sbp);
2564 break;
2565 #endif /* MENLO_SUPPORT */
2566
2567 default:
2568 rval = emlxs_send_els(port, sbp);
2569 }
2570 break;
2571
2572 case FC_PKT_OUTBOUND:
2573 switch (pkt->pkt_cmd_fhdr.type) {
2574 #ifdef SFCT_SUPPORT
2575 case FC_TYPE_SCSI_FCP:
2576 rval = emlxs_send_fct_status(port, sbp);
2577 break;
2578
2579 case FC_TYPE_BASIC_LS:
2580 rval = emlxs_send_fct_abort(port, sbp);
2581 break;
2582 #endif /* SFCT_SUPPORT */
2583
2584 case FC_TYPE_FC_SERVICES:
2585 rval = emlxs_send_ct_rsp(port, sbp);
2586 break;
2587 #ifdef MENLO_SUPPORT
2588 case EMLXS_MENLO_TYPE:
2589 rval = emlxs_send_menlo(port, sbp);
2590 break;
2591 #endif /* MENLO_SUPPORT */
2592
2593 default:
2594 rval = emlxs_send_els_rsp(port, sbp);
2595 }
2596 break;
2597
2598 default:
2599 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2600 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2601 rval = FC_TRANSPORT_ERROR;
2602 break;
2603 }
2604
2605 /* Check if send was not successful */
2606 if (rval != FC_SUCCESS) {
2607 /* Return packet to ULP */
2608 mutex_enter(&sbp->mtx);
2609 sbp->pkt_flags |= PACKET_ULP_OWNED;
2610 mutex_exit(&sbp->mtx);
2611
2612 return (rval);
2613 }
2614
2615 /* Check if this packet should be polled for completion before */
2616 /* returning. This check must be done with a saved copy of the */
2617 /* pkt_flags because the packet itself could already be freed from */
2618 /* memory if it was not polled. */
2619 if (pkt_flags & PACKET_POLLED) {
2620 emlxs_poll(port, sbp);
2621 }
2622
2623 return (FC_SUCCESS);
2624
2625 } /* emlxs_fca_transport() */
2626
2627
2628
2629 static void
emlxs_poll(emlxs_port_t * port,emlxs_buf_t * sbp)2630 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2631 {
2632 emlxs_hba_t *hba = HBA;
2633 fc_packet_t *pkt = PRIV2PKT(sbp);
2634 clock_t timeout;
2635 clock_t time;
2636 CHANNEL *cp;
2637 int in_panic = 0;
2638
2639 mutex_enter(&EMLXS_PORT_LOCK);
2640 hba->io_poll_count++;
2641 mutex_exit(&EMLXS_PORT_LOCK);
2642
2643 /* Check for panic situation */
2644 cp = (CHANNEL *)sbp->channel;
2645
2646 if (ddi_in_panic()) {
2647 in_panic = 1;
2648 /*
2649 * In panic situations there will be one thread with
2650 * no interrrupts (hard or soft) and no timers
2651 */
2652
2653 /*
2654 * We must manually poll everything in this thread
2655 * to keep the driver going.
2656 */
2657
2658 /* Keep polling the chip until our IO is completed */
2659 /* Driver's timer will not function during panics. */
2660 /* Therefore, timer checks must be performed manually. */
2661 (void) drv_getparm(LBOLT, &time);
2662 timeout = time + drv_usectohz(1000000);
2663 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2664 EMLXS_SLI_POLL_INTR(hba);
2665 (void) drv_getparm(LBOLT, &time);
2666
2667 /* Trigger timer checks periodically */
2668 if (time >= timeout) {
2669 emlxs_timer_checks(hba);
2670 timeout = time + drv_usectohz(1000000);
2671 }
2672 }
2673 } else {
2674 /* Wait for IO completion */
2675 /* The driver's timer will detect */
2676 /* any timeout and abort the I/O. */
2677 mutex_enter(&EMLXS_PKT_LOCK);
2678 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2679 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2680 }
2681 mutex_exit(&EMLXS_PKT_LOCK);
2682 }
2683
2684 /* Check for fcp reset pkt */
2685 if (sbp->pkt_flags & PACKET_FCP_RESET) {
2686 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2687 /* Flush the IO's on the chipq */
2688 (void) emlxs_chipq_node_flush(port,
2689 &hba->chan[hba->channel_fcp],
2690 sbp->node, sbp);
2691 } else {
2692 /* Flush the IO's on the chipq for this lun */
2693 (void) emlxs_chipq_lun_flush(port,
2694 sbp->node, sbp->lun, sbp);
2695 }
2696
2697 if (sbp->flush_count == 0) {
2698 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2699 goto done;
2700 }
2701
2702 /* Set the timeout so the flush has time to complete */
2703 timeout = emlxs_timeout(hba, 60);
2704 (void) drv_getparm(LBOLT, &time);
2705 while ((time < timeout) && sbp->flush_count > 0) {
2706 delay(drv_usectohz(500000));
2707 (void) drv_getparm(LBOLT, &time);
2708 }
2709
2710 if (sbp->flush_count == 0) {
2711 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2712 goto done;
2713 }
2714
2715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2716 "sbp=%p flush_count=%d. Waiting...", sbp,
2717 sbp->flush_count);
2718
2719 /* Let's try this one more time */
2720
2721 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2722 /* Flush the IO's on the chipq */
2723 (void) emlxs_chipq_node_flush(port,
2724 &hba->chan[hba->channel_fcp],
2725 sbp->node, sbp);
2726 } else {
2727 /* Flush the IO's on the chipq for this lun */
2728 (void) emlxs_chipq_lun_flush(port,
2729 sbp->node, sbp->lun, sbp);
2730 }
2731
2732 /* Reset the timeout so the flush has time to complete */
2733 timeout = emlxs_timeout(hba, 60);
2734 (void) drv_getparm(LBOLT, &time);
2735 while ((time < timeout) && sbp->flush_count > 0) {
2736 delay(drv_usectohz(500000));
2737 (void) drv_getparm(LBOLT, &time);
2738 }
2739
2740 if (sbp->flush_count == 0) {
2741 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2742 goto done;
2743 }
2744
2745 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2746 "sbp=%p flush_count=%d. Resetting link.", sbp,
2747 sbp->flush_count);
2748
2749 /* Let's first try to reset the link */
2750 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
2751
2752 if (sbp->flush_count == 0) {
2753 goto done;
2754 }
2755
2756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2757 "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2758 sbp->flush_count);
2759
2760 /* If that doesn't work, reset the adapter */
2761 (void) emlxs_reset(port, FC_FCA_RESET);
2762
2763 if (sbp->flush_count != 0) {
2764 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2765 "sbp=%p flush_count=%d. Giving up.", sbp,
2766 sbp->flush_count);
2767 }
2768
2769 }
2770 /* PACKET_FCP_RESET */
2771 done:
2772
2773 /* Packet has been declared completed and is now ready to be returned */
2774
2775 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2776 emlxs_unswap_pkt(sbp);
2777 #endif /* EMLXS_MODREV2X */
2778
2779 mutex_enter(&sbp->mtx);
2780 sbp->pkt_flags |= PACKET_ULP_OWNED;
2781 mutex_exit(&sbp->mtx);
2782
2783 mutex_enter(&EMLXS_PORT_LOCK);
2784 hba->io_poll_count--;
2785 mutex_exit(&EMLXS_PORT_LOCK);
2786
2787 #ifdef FMA_SUPPORT
2788 if (!in_panic) {
2789 emlxs_check_dma(hba, sbp);
2790 }
2791 #endif
2792
2793 /* Make ULP completion callback if required */
2794 if (pkt->pkt_comp) {
2795 cp->ulpCmplCmd++;
2796 (*pkt->pkt_comp) (pkt);
2797 }
2798
2799 #ifdef FMA_SUPPORT
2800 if (hba->flag & FC_DMA_CHECK_ERROR) {
2801 emlxs_thread_spawn(hba, emlxs_restart_thread,
2802 NULL, NULL);
2803 }
2804 #endif
2805
2806 return;
2807
2808 } /* emlxs_poll() */
2809
2810
2811 static int
emlxs_fca_ub_alloc(opaque_t fca_port_handle,uint64_t tokens[],uint32_t size,uint32_t * count,uint32_t type)2812 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2813 uint32_t *count, uint32_t type)
2814 {
2815 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2816 emlxs_hba_t *hba = HBA;
2817 char *err = NULL;
2818 emlxs_unsol_buf_t *pool = NULL;
2819 emlxs_unsol_buf_t *new_pool = NULL;
2820 emlxs_config_t *cfg = &CFG;
2821 int32_t i;
2822 int result;
2823 uint32_t free_resv;
2824 uint32_t free;
2825 fc_unsol_buf_t *ubp;
2826 emlxs_ub_priv_t *ub_priv;
2827 int rc;
2828
2829 if (!(port->flag & EMLXS_INI_ENABLED)) {
2830 if (tokens && count) {
2831 bzero(tokens, (sizeof (uint64_t) * (*count)));
2832 }
2833 return (FC_SUCCESS);
2834 }
2835
2836 if (!(port->flag & EMLXS_INI_BOUND)) {
2837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2838 "fca_ub_alloc failed: Port not bound! size=%x count=%d "
2839 "type=%x", size, *count, type);
2840
2841 return (FC_FAILURE);
2842 }
2843
2844 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2845 "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type);
2846
2847 if (count && (*count > EMLXS_MAX_UBUFS)) {
2848 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2849 "fca_ub_alloc failed: Too many unsolicted buffers "
2850 "requested. count=%x", *count);
2851
2852 return (FC_FAILURE);
2853
2854 }
2855
2856 if (tokens == NULL) {
2857 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2858 "fca_ub_alloc failed: Token array is NULL.");
2859
2860 return (FC_FAILURE);
2861 }
2862
2863 /* Clear the token array */
2864 bzero(tokens, (sizeof (uint64_t) * (*count)));
2865
2866 free_resv = 0;
2867 free = *count;
2868 switch (type) {
2869 case FC_TYPE_BASIC_LS:
2870 err = "BASIC_LS";
2871 break;
2872 case FC_TYPE_EXTENDED_LS:
2873 err = "EXTENDED_LS";
2874 free = *count / 2; /* Hold 50% for normal use */
2875 free_resv = *count - free; /* Reserve 50% for RSCN use */
2876 break;
2877 case FC_TYPE_IS8802:
2878 err = "IS8802";
2879 break;
2880 case FC_TYPE_IS8802_SNAP:
2881 err = "IS8802_SNAP";
2882
2883 if (cfg[CFG_NETWORK_ON].current == 0) {
2884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2885 "fca_ub_alloc failed: IP support is disabled.");
2886
2887 return (FC_FAILURE);
2888 }
2889 break;
2890 case FC_TYPE_SCSI_FCP:
2891 err = "SCSI_FCP";
2892 break;
2893 case FC_TYPE_SCSI_GPP:
2894 err = "SCSI_GPP";
2895 break;
2896 case FC_TYPE_HIPP_FP:
2897 err = "HIPP_FP";
2898 break;
2899 case FC_TYPE_IPI3_MASTER:
2900 err = "IPI3_MASTER";
2901 break;
2902 case FC_TYPE_IPI3_SLAVE:
2903 err = "IPI3_SLAVE";
2904 break;
2905 case FC_TYPE_IPI3_PEER:
2906 err = "IPI3_PEER";
2907 break;
2908 case FC_TYPE_FC_SERVICES:
2909 err = "FC_SERVICES";
2910 break;
2911 }
2912
2913 mutex_enter(&EMLXS_UB_LOCK);
2914
2915 /*
2916 * Walk through the list of the unsolicited buffers
2917 * for this ddiinst of emlx.
2918 */
2919
2920 pool = port->ub_pool;
2921
2922 /*
2923 * The emlxs_fca_ub_alloc() can be called more than once with different
2924 * size. We will reject the call if there are
2925 * duplicate size with the same FC-4 type.
2926 */
2927 while (pool) {
2928 if ((pool->pool_type == type) &&
2929 (pool->pool_buf_size == size)) {
2930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2931 "fca_ub_alloc failed: Unsolicited buffer pool "
2932 "for %s of size 0x%x bytes already exists.",
2933 err, size);
2934
2935 result = FC_FAILURE;
2936 goto fail;
2937 }
2938
2939 pool = pool->pool_next;
2940 }
2941
2942 mutex_exit(&EMLXS_UB_LOCK);
2943
2944 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2945 KM_SLEEP);
2946
2947 new_pool->pool_next = NULL;
2948 new_pool->pool_type = type;
2949 new_pool->pool_buf_size = size;
2950 new_pool->pool_nentries = *count;
2951 new_pool->pool_available = new_pool->pool_nentries;
2952 new_pool->pool_free = free;
2953 new_pool->pool_free_resv = free_resv;
2954 new_pool->fc_ubufs =
2955 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2956
2957 new_pool->pool_first_token = port->ub_count;
2958 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2959
2960 for (i = 0; i < new_pool->pool_nentries; i++) {
2961 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2962 ubp->ub_port_handle = port->ulp_handle;
2963 ubp->ub_token = (uint64_t)((unsigned long)ubp);
2964 ubp->ub_bufsize = size;
2965 ubp->ub_class = FC_TRAN_CLASS3;
2966 ubp->ub_port_private = NULL;
2967 ubp->ub_fca_private =
2968 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2969 KM_SLEEP);
2970
2971 /*
2972 * Initialize emlxs_ub_priv_t
2973 */
2974 ub_priv = ubp->ub_fca_private;
2975 ub_priv->ubp = ubp;
2976 ub_priv->port = port;
2977 ub_priv->flags = EMLXS_UB_FREE;
2978 ub_priv->available = 1;
2979 ub_priv->pool = new_pool;
2980 ub_priv->time = 0;
2981 ub_priv->timeout = 0;
2982 ub_priv->token = port->ub_count;
2983 ub_priv->cmd = 0;
2984
2985 /* Allocate the actual buffer */
2986 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2987
2988
2989 tokens[i] = (uint64_t)((unsigned long)ubp);
2990 port->ub_count++;
2991 }
2992
2993 mutex_enter(&EMLXS_UB_LOCK);
2994
2995 /* Add the pool to the top of the pool list */
2996 new_pool->pool_prev = NULL;
2997 new_pool->pool_next = port->ub_pool;
2998
2999 if (port->ub_pool) {
3000 port->ub_pool->pool_prev = new_pool;
3001 }
3002 port->ub_pool = new_pool;
3003
3004 /* Set the post counts */
3005 if (type == FC_TYPE_IS8802_SNAP) {
3006 MAILBOXQ *mbox;
3007
3008 port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
3009
3010 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
3011 MEM_MBOX))) {
3012 emlxs_mb_config_farp(hba, mbox);
3013 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
3014 mbox, MBX_NOWAIT, 0);
3015 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3016 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
3017 }
3018 }
3019 port->flag |= EMLXS_PORT_IP_UP;
3020 } else if (type == FC_TYPE_EXTENDED_LS) {
3021 port->ub_post[hba->channel_els] += new_pool->pool_nentries;
3022 } else if (type == FC_TYPE_FC_SERVICES) {
3023 port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
3024 }
3025
3026 mutex_exit(&EMLXS_UB_LOCK);
3027
3028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3029 "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3030 *count, err, size);
3031
3032 return (FC_SUCCESS);
3033
3034 fail:
3035
3036 /* Clean the pool */
3037 for (i = 0; tokens[i] != 0; i++) {
3038 /* Get the buffer object */
3039 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3040 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3041
3042 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3043 "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x "
3044 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3045
3046 /* Free the actual buffer */
3047 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3048
3049 /* Free the private area of the buffer object */
3050 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3051
3052 tokens[i] = 0;
3053 port->ub_count--;
3054 }
3055
3056 if (new_pool) {
3057 /* Free the array of buffer objects in the pool */
3058 kmem_free((caddr_t)new_pool->fc_ubufs,
3059 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3060
3061 /* Free the pool object */
3062 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3063 }
3064
3065 mutex_exit(&EMLXS_UB_LOCK);
3066
3067 return (result);
3068
3069 } /* emlxs_fca_ub_alloc() */
3070
3071
3072 static void
emlxs_ub_els_reject(emlxs_port_t * port,fc_unsol_buf_t * ubp)3073 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3074 {
3075 emlxs_hba_t *hba = HBA;
3076 emlxs_ub_priv_t *ub_priv;
3077 fc_packet_t *pkt;
3078 ELS_PKT *els;
3079 uint32_t sid;
3080
3081 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3082
3083 if (hba->state <= FC_LINK_DOWN) {
3084 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3085 return;
3086 }
3087
3088 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3089 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3090 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3091 return;
3092 }
3093
3094 sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3095
3096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3097 "%s dropped: sid=%x. Rejecting.",
3098 emlxs_elscmd_xlate(ub_priv->cmd), sid);
3099
3100 pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3101 pkt->pkt_timeout = (2 * hba->fc_ratov);
3102
3103 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3104 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3105 pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3106 }
3107
3108 /* Build the fc header */
3109 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3110 pkt->pkt_cmd_fhdr.r_ctl =
3111 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3112 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3113 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3114 pkt->pkt_cmd_fhdr.f_ctl =
3115 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3116 pkt->pkt_cmd_fhdr.seq_id = 0;
3117 pkt->pkt_cmd_fhdr.df_ctl = 0;
3118 pkt->pkt_cmd_fhdr.seq_cnt = 0;
3119 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3120 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3121 pkt->pkt_cmd_fhdr.ro = 0;
3122
3123 /* Build the command */
3124 els = (ELS_PKT *) pkt->pkt_cmd;
3125 els->elsCode = 0x01;
3126 els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3127 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3128 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3129 els->un.lsRjt.un.b.vendorUnique = 0x02;
3130
3131 /* Send the pkt later in another thread */
3132 (void) emlxs_pkt_send(pkt, 0);
3133
3134 return;
3135
3136 } /* emlxs_ub_els_reject() */
3137
3138 extern int
emlxs_fca_ub_release(opaque_t fca_port_handle,uint32_t count,uint64_t tokens[])3139 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count,
3140 uint64_t tokens[])
3141 {
3142 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3143 emlxs_hba_t *hba = HBA;
3144 fc_unsol_buf_t *ubp;
3145 emlxs_ub_priv_t *ub_priv;
3146 uint32_t i;
3147 uint32_t time;
3148 emlxs_unsol_buf_t *pool;
3149
3150 if (count == 0) {
3151 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3152 "fca_ub_release: Nothing to do. count=%d", count);
3153
3154 return (FC_SUCCESS);
3155 }
3156
3157 if (!(port->flag & EMLXS_INI_BOUND)) {
3158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3159 "fca_ub_release failed: Port not bound. count=%d "
3160 "token[0]=%p",
3161 count, tokens[0]);
3162
3163 return (FC_UNBOUND);
3164 }
3165
3166 mutex_enter(&EMLXS_UB_LOCK);
3167
3168 if (!port->ub_pool) {
3169 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3170 "fca_ub_release failed: No pools! count=%d token[0]=%p",
3171 count, tokens[0]);
3172
3173 mutex_exit(&EMLXS_UB_LOCK);
3174 return (FC_UB_BADTOKEN);
3175 }
3176
3177 for (i = 0; i < count; i++) {
3178 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3179
3180 if (!ubp) {
3181 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3182 "fca_ub_release failed: count=%d tokens[%d]=0",
3183 count, i);
3184
3185 mutex_exit(&EMLXS_UB_LOCK);
3186 return (FC_UB_BADTOKEN);
3187 }
3188
3189 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3190
3191 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3192 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3193 "fca_ub_release failed: Dead buffer found. ubp=%p",
3194 ubp);
3195
3196 mutex_exit(&EMLXS_UB_LOCK);
3197 return (FC_UB_BADTOKEN);
3198 }
3199
3200 if (ub_priv->flags == EMLXS_UB_FREE) {
3201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3202 "fca_ub_release: Buffer already free! ubp=%p "
3203 "token=%x",
3204 ubp, ub_priv->token);
3205
3206 continue;
3207 }
3208
3209 /* Check for dropped els buffer */
3210 /* ULP will do this sometimes without sending a reply */
3211 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3212 !(ub_priv->flags & EMLXS_UB_REPLY)) {
3213 emlxs_ub_els_reject(port, ubp);
3214 }
3215
3216 /* Mark the buffer free */
3217 ub_priv->flags = EMLXS_UB_FREE;
3218 bzero(ubp->ub_buffer, ubp->ub_bufsize);
3219
3220 time = hba->timer_tics - ub_priv->time;
3221 ub_priv->time = 0;
3222 ub_priv->timeout = 0;
3223
3224 pool = ub_priv->pool;
3225
3226 if (ub_priv->flags & EMLXS_UB_RESV) {
3227 pool->pool_free_resv++;
3228 } else {
3229 pool->pool_free++;
3230 }
3231
3232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3233 "fca_ub_release: ubp=%p token=%x time=%d av=%d "
3234 "(%d,%d,%d,%d)",
3235 ubp, ub_priv->token, time, ub_priv->available,
3236 pool->pool_nentries, pool->pool_available,
3237 pool->pool_free, pool->pool_free_resv);
3238
3239 /* Check if pool can be destroyed now */
3240 if ((pool->pool_available == 0) &&
3241 (pool->pool_free + pool->pool_free_resv ==
3242 pool->pool_nentries)) {
3243 emlxs_ub_destroy(port, pool);
3244 }
3245 }
3246
3247 mutex_exit(&EMLXS_UB_LOCK);
3248
3249 return (FC_SUCCESS);
3250
3251 } /* emlxs_fca_ub_release() */
3252
3253
3254 static int
emlxs_fca_ub_free(opaque_t fca_port_handle,uint32_t count,uint64_t tokens[])3255 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3256 {
3257 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3258 emlxs_unsol_buf_t *pool;
3259 fc_unsol_buf_t *ubp;
3260 emlxs_ub_priv_t *ub_priv;
3261 uint32_t i;
3262
3263 if (!(port->flag & EMLXS_INI_ENABLED)) {
3264 return (FC_SUCCESS);
3265 }
3266
3267 if (count == 0) {
3268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3269 "fca_ub_free: Nothing to do. count=%d token[0]=%p", count,
3270 tokens[0]);
3271
3272 return (FC_SUCCESS);
3273 }
3274
3275 if (!(port->flag & EMLXS_INI_BOUND)) {
3276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3277 "fca_ub_free: Port not bound. count=%d token[0]=%p", count,
3278 tokens[0]);
3279
3280 return (FC_SUCCESS);
3281 }
3282
3283 mutex_enter(&EMLXS_UB_LOCK);
3284
3285 if (!port->ub_pool) {
3286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3287 "fca_ub_free failed: No pools! count=%d token[0]=%p", count,
3288 tokens[0]);
3289
3290 mutex_exit(&EMLXS_UB_LOCK);
3291 return (FC_UB_BADTOKEN);
3292 }
3293
3294 /* Process buffer list */
3295 for (i = 0; i < count; i++) {
3296 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3297
3298 if (!ubp) {
3299 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3300 "fca_ub_free failed: count=%d tokens[%d]=0", count,
3301 i);
3302
3303 mutex_exit(&EMLXS_UB_LOCK);
3304 return (FC_UB_BADTOKEN);
3305 }
3306
3307 /* Mark buffer unavailable */
3308 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3309
3310 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3311 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3312 "fca_ub_free failed: Dead buffer found. ubp=%p",
3313 ubp);
3314
3315 mutex_exit(&EMLXS_UB_LOCK);
3316 return (FC_UB_BADTOKEN);
3317 }
3318
3319 ub_priv->available = 0;
3320
3321 /* Mark one less buffer available in the parent pool */
3322 pool = ub_priv->pool;
3323
3324 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3325 "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3326 ub_priv->token, pool->pool_nentries,
3327 pool->pool_available - 1, pool->pool_free,
3328 pool->pool_free_resv);
3329
3330 if (pool->pool_available) {
3331 pool->pool_available--;
3332
3333 /* Check if pool can be destroyed */
3334 if ((pool->pool_available == 0) &&
3335 (pool->pool_free + pool->pool_free_resv ==
3336 pool->pool_nentries)) {
3337 emlxs_ub_destroy(port, pool);
3338 }
3339 }
3340 }
3341
3342 mutex_exit(&EMLXS_UB_LOCK);
3343
3344 return (FC_SUCCESS);
3345
3346 } /* emlxs_fca_ub_free() */
3347
3348
3349 /* EMLXS_UB_LOCK must be held when calling this routine */
3350 extern void
emlxs_ub_destroy(emlxs_port_t * port,emlxs_unsol_buf_t * pool)3351 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3352 {
3353 emlxs_hba_t *hba = HBA;
3354 emlxs_unsol_buf_t *next;
3355 emlxs_unsol_buf_t *prev;
3356 fc_unsol_buf_t *ubp;
3357 uint32_t i;
3358
3359 /* Remove the pool object from the pool list */
3360 next = pool->pool_next;
3361 prev = pool->pool_prev;
3362
3363 if (port->ub_pool == pool) {
3364 port->ub_pool = next;
3365 }
3366
3367 if (prev) {
3368 prev->pool_next = next;
3369 }
3370
3371 if (next) {
3372 next->pool_prev = prev;
3373 }
3374
3375 pool->pool_prev = NULL;
3376 pool->pool_next = NULL;
3377
3378 /* Clear the post counts */
3379 switch (pool->pool_type) {
3380 case FC_TYPE_IS8802_SNAP:
3381 port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3382 break;
3383
3384 case FC_TYPE_EXTENDED_LS:
3385 port->ub_post[hba->channel_els] -= pool->pool_nentries;
3386 break;
3387
3388 case FC_TYPE_FC_SERVICES:
3389 port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3390 break;
3391 }
3392
3393 /* Now free the pool memory */
3394 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3395 "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3396 pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3397
3398 /* Process the array of buffer objects in the pool */
3399 for (i = 0; i < pool->pool_nentries; i++) {
3400 /* Get the buffer object */
3401 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3402
3403 /* Free the memory the buffer object represents */
3404 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3405
3406 /* Free the private area of the buffer object */
3407 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3408 }
3409
3410 /* Free the array of buffer objects in the pool */
3411 kmem_free((caddr_t)pool->fc_ubufs,
3412 (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3413
3414 /* Free the pool object */
3415 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3416
3417 return;
3418
3419 } /* emlxs_ub_destroy() */
3420
3421
3422 /*ARGSUSED*/
3423 extern int
emlxs_fca_pkt_abort(opaque_t fca_port_handle,fc_packet_t * pkt,int32_t sleep)3424 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3425 {
3426 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3427 emlxs_hba_t *hba = HBA;
3428 emlxs_config_t *cfg = &CFG;
3429
3430 emlxs_buf_t *sbp;
3431 NODELIST *nlp;
3432 NODELIST *prev_nlp;
3433 uint8_t channelno;
3434 CHANNEL *cp;
3435 clock_t pkt_timeout;
3436 clock_t timer;
3437 clock_t time;
3438 int32_t pkt_ret;
3439 IOCBQ *iocbq;
3440 IOCBQ *next;
3441 IOCBQ *prev;
3442 uint32_t found;
3443 uint32_t pass = 0;
3444
3445 sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3446 iocbq = &sbp->iocbq;
3447 nlp = (NODELIST *)sbp->node;
3448 cp = (CHANNEL *)sbp->channel;
3449 channelno = (cp) ? cp->channelno : 0;
3450
3451 if (!(port->flag & EMLXS_INI_BOUND)) {
3452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3453 "Port not bound.");
3454 return (FC_UNBOUND);
3455 }
3456
3457 if (!(hba->flag & FC_ONLINE_MODE)) {
3458 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3459 "Adapter offline.");
3460 return (FC_OFFLINE);
3461 }
3462
3463 /* ULP requires the aborted pkt to be completed */
3464 /* back to ULP before returning from this call. */
3465 /* SUN knows of problems with this call so they suggested that we */
3466 /* always return a FC_FAILURE for this call, until it is worked out. */
3467
3468 /* Check if pkt is no good */
3469 if (!(sbp->pkt_flags & PACKET_VALID) ||
3470 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3471 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3472 "Bad sbp. flags=%x", sbp->pkt_flags);
3473 return (FC_FAILURE);
3474 }
3475
3476 /* Tag this now */
3477 /* This will prevent any thread except ours from completing it */
3478 mutex_enter(&sbp->mtx);
3479
3480 /* Check again if we still own this */
3481 if (!(sbp->pkt_flags & PACKET_VALID) ||
3482 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3483 mutex_exit(&sbp->mtx);
3484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3485 "Bad sbp. flags=%x", sbp->pkt_flags);
3486 return (FC_FAILURE);
3487 }
3488
3489 /* Check if pkt is a real polled command */
3490 if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3491 (sbp->pkt_flags & PACKET_POLLED)) {
3492 mutex_exit(&sbp->mtx);
3493
3494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3495 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3496 sbp->pkt_flags);
3497 return (FC_FAILURE);
3498 }
3499
3500 sbp->pkt_flags |= PACKET_POLLED;
3501 sbp->pkt_flags |= PACKET_IN_ABORT;
3502
3503 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3504 PACKET_IN_TIMEOUT)) {
3505 mutex_exit(&sbp->mtx);
3506
3507 /* Do nothing, pkt already on its way out */
3508 goto done;
3509 }
3510
3511 mutex_exit(&sbp->mtx);
3512
3513 begin:
3514 pass++;
3515
3516 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3517
3518 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3519 /* Find it on the queue */
3520 found = 0;
3521 if (iocbq->flag & IOCB_PRIORITY) {
3522 /* Search the priority queue */
3523 prev = NULL;
3524 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3525
3526 while (next) {
3527 if (next == iocbq) {
3528 /* Remove it */
3529 if (prev) {
3530 prev->next = iocbq->next;
3531 }
3532
3533 if (nlp->nlp_ptx[channelno].q_last ==
3534 (void *)iocbq) {
3535 nlp->nlp_ptx[channelno].q_last =
3536 (void *)prev;
3537 }
3538
3539 if (nlp->nlp_ptx[channelno].q_first ==
3540 (void *)iocbq) {
3541 nlp->nlp_ptx[channelno].
3542 q_first =
3543 (void *)iocbq->next;
3544 }
3545
3546 nlp->nlp_ptx[channelno].q_cnt--;
3547 iocbq->next = NULL;
3548 found = 1;
3549 break;
3550 }
3551
3552 prev = next;
3553 next = next->next;
3554 }
3555 } else {
3556 /* Search the normal queue */
3557 prev = NULL;
3558 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3559
3560 while (next) {
3561 if (next == iocbq) {
3562 /* Remove it */
3563 if (prev) {
3564 prev->next = iocbq->next;
3565 }
3566
3567 if (nlp->nlp_tx[channelno].q_last ==
3568 (void *)iocbq) {
3569 nlp->nlp_tx[channelno].q_last =
3570 (void *)prev;
3571 }
3572
3573 if (nlp->nlp_tx[channelno].q_first ==
3574 (void *)iocbq) {
3575 nlp->nlp_tx[channelno].q_first =
3576 (void *)iocbq->next;
3577 }
3578
3579 nlp->nlp_tx[channelno].q_cnt--;
3580 iocbq->next = NULL;
3581 found = 1;
3582 break;
3583 }
3584
3585 prev = next;
3586 next = (IOCBQ *) next->next;
3587 }
3588 }
3589
3590 if (!found) {
3591 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3592 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3593 "I/O not found in driver. sbp=%p flags=%x", sbp,
3594 sbp->pkt_flags);
3595 goto done;
3596 }
3597
3598 /* Check if node still needs servicing */
3599 if ((nlp->nlp_ptx[channelno].q_first) ||
3600 (nlp->nlp_tx[channelno].q_first &&
3601 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3602
3603 /*
3604 * If this is the base node,
3605 * then don't shift the pointers
3606 */
3607 /* We want to drain the base node before moving on */
3608 if (!nlp->nlp_base) {
3609 /* Just shift channel queue */
3610 /* pointers to next node */
3611 cp->nodeq.q_last = (void *) nlp;
3612 cp->nodeq.q_first = nlp->nlp_next[channelno];
3613 }
3614 } else {
3615 /* Remove node from channel queue */
3616
3617 /* If this is the only node on list */
3618 if (cp->nodeq.q_first == (void *)nlp &&
3619 cp->nodeq.q_last == (void *)nlp) {
3620 cp->nodeq.q_last = NULL;
3621 cp->nodeq.q_first = NULL;
3622 cp->nodeq.q_cnt = 0;
3623 } else if (cp->nodeq.q_first == (void *)nlp) {
3624 cp->nodeq.q_first = nlp->nlp_next[channelno];
3625 ((NODELIST *) cp->nodeq.q_last)->
3626 nlp_next[channelno] = cp->nodeq.q_first;
3627 cp->nodeq.q_cnt--;
3628 } else {
3629 /*
3630 * This is a little more difficult find the
3631 * previous node in the circular channel queue
3632 */
3633 prev_nlp = nlp;
3634 while (prev_nlp->nlp_next[channelno] != nlp) {
3635 prev_nlp = prev_nlp->
3636 nlp_next[channelno];
3637 }
3638
3639 prev_nlp->nlp_next[channelno] =
3640 nlp->nlp_next[channelno];
3641
3642 if (cp->nodeq.q_last == (void *)nlp) {
3643 cp->nodeq.q_last = (void *)prev_nlp;
3644 }
3645 cp->nodeq.q_cnt--;
3646
3647 }
3648
3649 /* Clear node */
3650 nlp->nlp_next[channelno] = NULL;
3651 }
3652
3653 /* Free the ULPIOTAG and the bmp */
3654 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3655 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3656 } else {
3657 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3658 }
3659
3660
3661 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3662
3663 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3664 IOERR_ABORT_REQUESTED, 1);
3665
3666 goto done;
3667 }
3668
3669 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3670
3671
3672 /* Check the chip queue */
3673 mutex_enter(&EMLXS_FCTAB_LOCK);
3674
3675 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3676 !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3677 (sbp == hba->fc_table[sbp->iotag])) {
3678
3679 /* Create the abort IOCB */
3680 if (hba->state >= FC_LINK_UP) {
3681 iocbq =
3682 emlxs_create_abort_xri_cn(port, sbp->node,
3683 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3684
3685 mutex_enter(&sbp->mtx);
3686 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3687 sbp->ticks =
3688 hba->timer_tics + (4 * hba->fc_ratov) + 10;
3689 sbp->abort_attempts++;
3690 mutex_exit(&sbp->mtx);
3691 } else {
3692 iocbq =
3693 emlxs_create_close_xri_cn(port, sbp->node,
3694 sbp->iotag, cp);
3695
3696 mutex_enter(&sbp->mtx);
3697 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3698 sbp->ticks = hba->timer_tics + 30;
3699 sbp->abort_attempts++;
3700 mutex_exit(&sbp->mtx);
3701 }
3702
3703 mutex_exit(&EMLXS_FCTAB_LOCK);
3704
3705 /* Send this iocbq */
3706 if (iocbq) {
3707 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3708 iocbq = NULL;
3709 }
3710
3711 goto done;
3712 }
3713
3714 mutex_exit(&EMLXS_FCTAB_LOCK);
3715
3716 /* Pkt was not on any queues */
3717
3718 /* Check again if we still own this */
3719 if (!(sbp->pkt_flags & PACKET_VALID) ||
3720 (sbp->pkt_flags &
3721 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3722 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3723 goto done;
3724 }
3725
3726 if (!sleep) {
3727 return (FC_FAILURE);
3728 }
3729
3730 /* Apparently the pkt was not found. Let's delay and try again */
3731 if (pass < 5) {
3732 delay(drv_usectohz(5000000)); /* 5 seconds */
3733
3734 /* Check again if we still own this */
3735 if (!(sbp->pkt_flags & PACKET_VALID) ||
3736 (sbp->pkt_flags &
3737 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3738 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3739 goto done;
3740 }
3741
3742 goto begin;
3743 }
3744
3745 force_it:
3746
3747 /* Force the completion now */
3748 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3749 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3750
3751 /* Now complete it */
3752 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3753 1);
3754
3755 done:
3756
3757 /* Now wait for the pkt to complete */
3758 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3759 /* Set thread timeout */
3760 pkt_timeout = emlxs_timeout(hba, 30);
3761
3762 /* Check for panic situation */
3763 if (ddi_in_panic()) {
3764
3765 /*
3766 * In panic situations there will be one thread with no
3767 * interrrupts (hard or soft) and no timers
3768 */
3769
3770 /*
3771 * We must manually poll everything in this thread
3772 * to keep the driver going.
3773 */
3774
3775 /* Keep polling the chip until our IO is completed */
3776 (void) drv_getparm(LBOLT, &time);
3777 timer = time + drv_usectohz(1000000);
3778 while ((time < pkt_timeout) &&
3779 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3780 EMLXS_SLI_POLL_INTR(hba);
3781 (void) drv_getparm(LBOLT, &time);
3782
3783 /* Trigger timer checks periodically */
3784 if (time >= timer) {
3785 emlxs_timer_checks(hba);
3786 timer = time + drv_usectohz(1000000);
3787 }
3788 }
3789 } else {
3790 /* Wait for IO completion or pkt_timeout */
3791 mutex_enter(&EMLXS_PKT_LOCK);
3792 pkt_ret = 0;
3793 while ((pkt_ret != -1) &&
3794 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3795 pkt_ret =
3796 cv_timedwait(&EMLXS_PKT_CV,
3797 &EMLXS_PKT_LOCK, pkt_timeout);
3798 }
3799 mutex_exit(&EMLXS_PKT_LOCK);
3800 }
3801
3802 /* Check if pkt_timeout occured. This is not good. */
3803 /* Something happened to our IO. */
3804 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3805 /* Force the completion now */
3806 goto force_it;
3807 }
3808 }
3809 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3810 emlxs_unswap_pkt(sbp);
3811 #endif /* EMLXS_MODREV2X */
3812
3813 /* Check again if we still own this */
3814 if ((sbp->pkt_flags & PACKET_VALID) &&
3815 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3816 mutex_enter(&sbp->mtx);
3817 if ((sbp->pkt_flags & PACKET_VALID) &&
3818 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3819 sbp->pkt_flags |= PACKET_ULP_OWNED;
3820 }
3821 mutex_exit(&sbp->mtx);
3822 }
3823
3824 #ifdef ULP_PATCH5
3825 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3826 return (FC_FAILURE);
3827 }
3828 #endif /* ULP_PATCH5 */
3829
3830 return (FC_SUCCESS);
3831
3832 } /* emlxs_fca_pkt_abort() */
3833
3834
3835 static void
emlxs_abort_all(emlxs_hba_t * hba,uint32_t * tx,uint32_t * chip)3836 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3837 {
3838 emlxs_port_t *port = &PPORT;
3839 fc_packet_t *pkt;
3840 emlxs_buf_t *sbp;
3841 uint32_t i;
3842 uint32_t flg;
3843 uint32_t rc;
3844 uint32_t txcnt;
3845 uint32_t chipcnt;
3846
3847 txcnt = 0;
3848 chipcnt = 0;
3849
3850 mutex_enter(&EMLXS_FCTAB_LOCK);
3851 for (i = 0; i < hba->max_iotag; i++) {
3852 sbp = hba->fc_table[i];
3853 if (sbp == NULL || sbp == STALE_PACKET) {
3854 continue;
3855 }
3856 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ);
3857 pkt = PRIV2PKT(sbp);
3858 mutex_exit(&EMLXS_FCTAB_LOCK);
3859 rc = emlxs_fca_pkt_abort(port, pkt, 0);
3860 if (rc == FC_SUCCESS) {
3861 if (flg) {
3862 chipcnt++;
3863 } else {
3864 txcnt++;
3865 }
3866 }
3867 mutex_enter(&EMLXS_FCTAB_LOCK);
3868 }
3869 mutex_exit(&EMLXS_FCTAB_LOCK);
3870 *tx = txcnt;
3871 *chip = chipcnt;
3872 } /* emlxs_abort_all() */
3873
3874
3875 extern int32_t
emlxs_reset(emlxs_port_t * port,uint32_t cmd)3876 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3877 {
3878 emlxs_hba_t *hba = HBA;
3879 int rval;
3880 int i = 0;
3881 int ret;
3882 clock_t timeout;
3883
3884 switch (cmd) {
3885 case FC_FCA_LINK_RESET:
3886
3887 mutex_enter(&EMLXS_PORT_LOCK);
3888 if (!(hba->flag & FC_ONLINE_MODE) ||
3889 (hba->state <= FC_LINK_DOWN)) {
3890 mutex_exit(&EMLXS_PORT_LOCK);
3891 return (FC_SUCCESS);
3892 }
3893
3894 if (hba->reset_state &
3895 (FC_LINK_RESET_INP | FC_PORT_RESET_INP)) {
3896 mutex_exit(&EMLXS_PORT_LOCK);
3897 return (FC_FAILURE);
3898 }
3899
3900 hba->reset_state |= FC_LINK_RESET_INP;
3901 hba->reset_request |= FC_LINK_RESET;
3902 mutex_exit(&EMLXS_PORT_LOCK);
3903
3904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3905 "Resetting Link.");
3906
3907 mutex_enter(&EMLXS_LINKUP_LOCK);
3908 hba->linkup_wait_flag = TRUE;
3909 mutex_exit(&EMLXS_LINKUP_LOCK);
3910
3911 if (emlxs_reset_link(hba, 1, 1)) {
3912 mutex_enter(&EMLXS_LINKUP_LOCK);
3913 hba->linkup_wait_flag = FALSE;
3914 mutex_exit(&EMLXS_LINKUP_LOCK);
3915
3916 mutex_enter(&EMLXS_PORT_LOCK);
3917 hba->reset_state &= ~FC_LINK_RESET_INP;
3918 hba->reset_request &= ~FC_LINK_RESET;
3919 mutex_exit(&EMLXS_PORT_LOCK);
3920
3921 return (FC_FAILURE);
3922 }
3923
3924 mutex_enter(&EMLXS_LINKUP_LOCK);
3925 timeout = emlxs_timeout(hba, 60);
3926 ret = 0;
3927 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3928 ret =
3929 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3930 timeout);
3931 }
3932
3933 hba->linkup_wait_flag = FALSE;
3934 mutex_exit(&EMLXS_LINKUP_LOCK);
3935
3936 mutex_enter(&EMLXS_PORT_LOCK);
3937 hba->reset_state &= ~FC_LINK_RESET_INP;
3938 hba->reset_request &= ~FC_LINK_RESET;
3939 mutex_exit(&EMLXS_PORT_LOCK);
3940
3941 if (ret == -1) {
3942 return (FC_FAILURE);
3943 }
3944
3945 return (FC_SUCCESS);
3946
3947 case FC_FCA_CORE:
3948 #ifdef DUMP_SUPPORT
3949 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3950 "Dumping Core.");
3951
3952 /* Schedule a USER dump */
3953 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3954
3955 /* Wait for dump to complete */
3956 emlxs_dump_wait(hba);
3957
3958 return (FC_SUCCESS);
3959 #endif /* DUMP_SUPPORT */
3960
3961 case FC_FCA_RESET:
3962 case FC_FCA_RESET_CORE:
3963
3964 mutex_enter(&EMLXS_PORT_LOCK);
3965 if (hba->reset_state & FC_PORT_RESET_INP) {
3966 mutex_exit(&EMLXS_PORT_LOCK);
3967 return (FC_FAILURE);
3968 }
3969
3970 hba->reset_state |= FC_PORT_RESET_INP;
3971 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
3972
3973 /* wait for any pending link resets to complete */
3974 while ((hba->reset_state & FC_LINK_RESET_INP) &&
3975 (i++ < 1000)) {
3976 mutex_exit(&EMLXS_PORT_LOCK);
3977 delay(drv_usectohz(1000));
3978 mutex_enter(&EMLXS_PORT_LOCK);
3979 }
3980
3981 if (hba->reset_state & FC_LINK_RESET_INP) {
3982 hba->reset_state &= ~FC_PORT_RESET_INP;
3983 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
3984 mutex_exit(&EMLXS_PORT_LOCK);
3985 return (FC_FAILURE);
3986 }
3987 mutex_exit(&EMLXS_PORT_LOCK);
3988
3989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3990 "Resetting Adapter.");
3991
3992 rval = FC_SUCCESS;
3993
3994 if (emlxs_offline(hba, 0) == 0) {
3995 (void) emlxs_online(hba);
3996 } else {
3997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3998 "Adapter reset failed. Device busy.");
3999
4000 rval = FC_DEVICE_BUSY;
4001 }
4002
4003 mutex_enter(&EMLXS_PORT_LOCK);
4004 hba->reset_state &= ~FC_PORT_RESET_INP;
4005 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4006 mutex_exit(&EMLXS_PORT_LOCK);
4007
4008 return (rval);
4009
4010 case EMLXS_DFC_RESET_ALL:
4011 case EMLXS_DFC_RESET_ALL_FORCE_DUMP:
4012
4013 mutex_enter(&EMLXS_PORT_LOCK);
4014 if (hba->reset_state & FC_PORT_RESET_INP) {
4015 mutex_exit(&EMLXS_PORT_LOCK);
4016 return (FC_FAILURE);
4017 }
4018
4019 hba->reset_state |= FC_PORT_RESET_INP;
4020 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
4021
4022 /* wait for any pending link resets to complete */
4023 while ((hba->reset_state & FC_LINK_RESET_INP) &&
4024 (i++ < 1000)) {
4025 mutex_exit(&EMLXS_PORT_LOCK);
4026 delay(drv_usectohz(1000));
4027 mutex_enter(&EMLXS_PORT_LOCK);
4028 }
4029
4030 if (hba->reset_state & FC_LINK_RESET_INP) {
4031 hba->reset_state &= ~FC_PORT_RESET_INP;
4032 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4033 mutex_exit(&EMLXS_PORT_LOCK);
4034 return (FC_FAILURE);
4035 }
4036 mutex_exit(&EMLXS_PORT_LOCK);
4037
4038 rval = FC_SUCCESS;
4039
4040 if (cmd == EMLXS_DFC_RESET_ALL) {
4041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4042 "Resetting Adapter (All Firmware Reset).");
4043
4044 emlxs_sli4_hba_reset_all(hba, 0);
4045 } else {
4046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4047 "Resetting Adapter "
4048 "(All Firmware Reset, Force Dump).");
4049
4050 emlxs_sli4_hba_reset_all(hba, 1);
4051 }
4052
4053 mutex_enter(&EMLXS_PORT_LOCK);
4054 hba->reset_state &= ~FC_PORT_RESET_INP;
4055 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4056 mutex_exit(&EMLXS_PORT_LOCK);
4057
4058 /* Wait for the timer thread to detect the error condition */
4059 delay(drv_usectohz(1000000));
4060
4061 /* Wait for the HBA to re-initialize */
4062 i = 0;
4063 mutex_enter(&EMLXS_PORT_LOCK);
4064 while (!(hba->flag & FC_ONLINE_MODE) && (i++ < 30)) {
4065 mutex_exit(&EMLXS_PORT_LOCK);
4066 delay(drv_usectohz(1000000));
4067 mutex_enter(&EMLXS_PORT_LOCK);
4068 }
4069
4070 if (!(hba->flag & FC_ONLINE_MODE)) {
4071 rval = FC_FAILURE;
4072 }
4073
4074 mutex_exit(&EMLXS_PORT_LOCK);
4075
4076 return (rval);
4077
4078 default:
4079 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4080 "reset: Unknown command. cmd=%x", cmd);
4081
4082 break;
4083 }
4084
4085 return (FC_FAILURE);
4086
4087 } /* emlxs_reset() */
4088
4089
4090 extern int32_t
emlxs_fca_reset(opaque_t fca_port_handle,uint32_t cmd)4091 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
4092 {
4093 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4094 emlxs_hba_t *hba = HBA;
4095 int32_t rval;
4096
4097 if (port->mode != MODE_INITIATOR) {
4098 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4099 "fca_reset failed. Port is not in initiator mode.");
4100
4101 return (FC_FAILURE);
4102 }
4103
4104 if (!(port->flag & EMLXS_INI_BOUND)) {
4105 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4106 "fca_reset: Port not bound.");
4107
4108 return (FC_UNBOUND);
4109 }
4110
4111 switch (cmd) {
4112 case FC_FCA_LINK_RESET:
4113 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4114 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4115 "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
4116 cmd = FC_FCA_RESET;
4117 } else {
4118 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4119 "fca_reset: FC_FCA_LINK_RESET");
4120 }
4121 break;
4122
4123 case FC_FCA_CORE:
4124 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4125 "fca_reset: FC_FCA_CORE");
4126 break;
4127
4128 case FC_FCA_RESET:
4129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4130 "fca_reset: FC_FCA_RESET");
4131 break;
4132
4133 case FC_FCA_RESET_CORE:
4134 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4135 "fca_reset: FC_FCA_RESET_CORE");
4136 break;
4137
4138 default:
4139 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4140 "fca_reset: Unknown command. cmd=%x", cmd);
4141 return (FC_FAILURE);
4142 }
4143
4144 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4145 hba->fw_flag |= FW_UPDATE_KERNEL;
4146 }
4147
4148 rval = emlxs_reset(port, cmd);
4149
4150 return (rval);
4151
4152 } /* emlxs_fca_reset() */
4153
4154
4155 extern int
emlxs_fca_port_manage(opaque_t fca_port_handle,fc_fca_pm_t * pm)4156 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4157 {
4158 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4159 emlxs_hba_t *hba = HBA;
4160 int32_t ret;
4161 emlxs_vpd_t *vpd = &VPD;
4162
4163 ret = FC_SUCCESS;
4164
4165 #ifdef IDLE_TIMER
4166 emlxs_pm_busy_component(hba);
4167 #endif /* IDLE_TIMER */
4168
4169 switch (pm->pm_cmd_code) {
4170
4171 case FC_PORT_GET_FW_REV:
4172 {
4173 char buffer[128];
4174
4175 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4176 "fca_port_manage: FC_PORT_GET_FW_REV");
4177
4178 (void) snprintf(buffer, (sizeof (buffer)-1),
4179 "%s %s", hba->model_info.model,
4180 vpd->fw_version);
4181 bzero(pm->pm_data_buf, pm->pm_data_len);
4182
4183 if (pm->pm_data_len < strlen(buffer) + 1) {
4184 ret = FC_NOMEM;
4185
4186 break;
4187 }
4188
4189 (void) strncpy(pm->pm_data_buf, buffer,
4190 (pm->pm_data_len-1));
4191 break;
4192 }
4193
4194 case FC_PORT_GET_FCODE_REV:
4195 {
4196 char buffer[128];
4197
4198 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4199 "fca_port_manage: FC_PORT_GET_FCODE_REV");
4200
4201 /* Force update here just to be sure */
4202 emlxs_get_fcode_version(hba);
4203
4204 (void) snprintf(buffer, (sizeof (buffer)-1),
4205 "%s %s", hba->model_info.model,
4206 vpd->fcode_version);
4207 bzero(pm->pm_data_buf, pm->pm_data_len);
4208
4209 if (pm->pm_data_len < strlen(buffer) + 1) {
4210 ret = FC_NOMEM;
4211 break;
4212 }
4213
4214 (void) strncpy(pm->pm_data_buf, buffer,
4215 (pm->pm_data_len-1));
4216 break;
4217 }
4218
4219 case FC_PORT_GET_DUMP_SIZE:
4220 {
4221 #ifdef DUMP_SUPPORT
4222 uint32_t dump_size = 0;
4223
4224 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4225 "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4226
4227 if (pm->pm_data_len < sizeof (uint32_t)) {
4228 ret = FC_NOMEM;
4229 break;
4230 }
4231
4232 (void) emlxs_get_dump(hba, NULL, &dump_size);
4233
4234 *((uint32_t *)pm->pm_data_buf) = dump_size;
4235
4236 #else
4237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4238 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4239
4240 #endif /* DUMP_SUPPORT */
4241
4242 break;
4243 }
4244
4245 case FC_PORT_GET_DUMP:
4246 {
4247 #ifdef DUMP_SUPPORT
4248 uint32_t dump_size = 0;
4249
4250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4251 "fca_port_manage: FC_PORT_GET_DUMP");
4252
4253 (void) emlxs_get_dump(hba, NULL, &dump_size);
4254
4255 if (pm->pm_data_len < dump_size) {
4256 ret = FC_NOMEM;
4257 break;
4258 }
4259
4260 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4261 (uint32_t *)&dump_size);
4262 #else
4263 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4264 "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4265
4266 #endif /* DUMP_SUPPORT */
4267
4268 break;
4269 }
4270
4271 case FC_PORT_FORCE_DUMP:
4272 {
4273 #ifdef DUMP_SUPPORT
4274 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4275 "fca_port_manage: FC_PORT_FORCE_DUMP");
4276
4277 /* Schedule a USER dump */
4278 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4279
4280 /* Wait for dump to complete */
4281 emlxs_dump_wait(hba);
4282 #else
4283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4284 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4285
4286 #endif /* DUMP_SUPPORT */
4287 break;
4288 }
4289
4290 case FC_PORT_LINK_STATE:
4291 {
4292 uint32_t *link_state;
4293
4294 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4295 "fca_port_manage: FC_PORT_LINK_STATE");
4296
4297 if (pm->pm_stat_len != sizeof (*link_state)) {
4298 ret = FC_NOMEM;
4299 break;
4300 }
4301
4302 if (pm->pm_cmd_buf != NULL) {
4303 /*
4304 * Can't look beyond the FCA port.
4305 */
4306 ret = FC_INVALID_REQUEST;
4307 break;
4308 }
4309
4310 link_state = (uint32_t *)pm->pm_stat_buf;
4311
4312 /* Set the state */
4313 if (hba->state >= FC_LINK_UP) {
4314 /* Check for loop topology */
4315 if (hba->topology == TOPOLOGY_LOOP) {
4316 *link_state = FC_STATE_LOOP;
4317 } else {
4318 *link_state = FC_STATE_ONLINE;
4319 }
4320
4321 /* Set the link speed */
4322 switch (hba->linkspeed) {
4323 case LA_2GHZ_LINK:
4324 *link_state |= FC_STATE_2GBIT_SPEED;
4325 break;
4326 case LA_4GHZ_LINK:
4327 *link_state |= FC_STATE_4GBIT_SPEED;
4328 break;
4329 case LA_8GHZ_LINK:
4330 *link_state |= FC_STATE_8GBIT_SPEED;
4331 break;
4332 case LA_10GHZ_LINK:
4333 *link_state |= FC_STATE_10GBIT_SPEED;
4334 break;
4335 case LA_16GHZ_LINK:
4336 *link_state |= FC_STATE_16GBIT_SPEED;
4337 break;
4338 case LA_32GHZ_LINK:
4339 *link_state |= FC_STATE_32GBIT_SPEED;
4340 break;
4341 case LA_1GHZ_LINK:
4342 default:
4343 *link_state |= FC_STATE_1GBIT_SPEED;
4344 break;
4345 }
4346 } else {
4347 *link_state = FC_STATE_OFFLINE;
4348 }
4349
4350 break;
4351 }
4352
4353
4354 case FC_PORT_ERR_STATS:
4355 case FC_PORT_RLS:
4356 {
4357 MAILBOXQ *mbq;
4358 MAILBOX *mb;
4359 fc_rls_acc_t *bp;
4360
4361 if (!(hba->flag & FC_ONLINE_MODE)) {
4362 return (FC_OFFLINE);
4363 }
4364 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4365 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4366
4367 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4368 ret = FC_NOMEM;
4369 break;
4370 }
4371
4372 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4373 MEM_MBOX)) == 0) {
4374 ret = FC_NOMEM;
4375 break;
4376 }
4377 mb = (MAILBOX *)mbq;
4378
4379 emlxs_mb_read_lnk_stat(hba, mbq);
4380 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4381 != MBX_SUCCESS) {
4382 ret = FC_PBUSY;
4383 } else {
4384 bp = (fc_rls_acc_t *)pm->pm_data_buf;
4385
4386 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4387 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4388 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4389 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4390 bp->rls_invalid_word =
4391 mb->un.varRdLnk.invalidXmitWord;
4392 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4393 }
4394
4395 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4396 break;
4397 }
4398
4399 case FC_PORT_DOWNLOAD_FW:
4400 if (!(hba->flag & FC_ONLINE_MODE)) {
4401 return (FC_OFFLINE);
4402 }
4403 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4404 "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4405 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4406 pm->pm_data_len, 1);
4407 break;
4408
4409 case FC_PORT_DOWNLOAD_FCODE:
4410 if (!(hba->flag & FC_ONLINE_MODE)) {
4411 return (FC_OFFLINE);
4412 }
4413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4414 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4415 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4416 pm->pm_data_len, 1);
4417 break;
4418
4419 case FC_PORT_DIAG:
4420 {
4421 uint32_t errno = 0;
4422 uint32_t did = 0;
4423 uint32_t pattern = 0;
4424
4425 switch (pm->pm_cmd_flags) {
4426 case EMLXS_DIAG_BIU:
4427
4428 if (!(hba->flag & FC_ONLINE_MODE)) {
4429 return (FC_OFFLINE);
4430 }
4431 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4432 "fca_port_manage: DIAG_BIU");
4433
4434 if (pm->pm_data_len) {
4435 pattern = *((uint32_t *)pm->pm_data_buf);
4436 }
4437
4438 errno = emlxs_diag_biu_run(hba, pattern);
4439
4440 if (pm->pm_stat_len == sizeof (errno)) {
4441 *(int *)pm->pm_stat_buf = errno;
4442 }
4443
4444 break;
4445
4446
4447 case EMLXS_DIAG_POST:
4448
4449 if (!(hba->flag & FC_ONLINE_MODE)) {
4450 return (FC_OFFLINE);
4451 }
4452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4453 "fca_port_manage: DIAG_POST");
4454
4455 errno = emlxs_diag_post_run(hba);
4456
4457 if (pm->pm_stat_len == sizeof (errno)) {
4458 *(int *)pm->pm_stat_buf = errno;
4459 }
4460
4461 break;
4462
4463
4464 case EMLXS_DIAG_ECHO:
4465
4466 if (!(hba->flag & FC_ONLINE_MODE)) {
4467 return (FC_OFFLINE);
4468 }
4469 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4470 "fca_port_manage: DIAG_ECHO");
4471
4472 if (pm->pm_cmd_len != sizeof (uint32_t)) {
4473 ret = FC_INVALID_REQUEST;
4474 break;
4475 }
4476
4477 did = *((uint32_t *)pm->pm_cmd_buf);
4478
4479 if (pm->pm_data_len) {
4480 pattern = *((uint32_t *)pm->pm_data_buf);
4481 }
4482
4483 errno = emlxs_diag_echo_run(port, did, pattern);
4484
4485 if (pm->pm_stat_len == sizeof (errno)) {
4486 *(int *)pm->pm_stat_buf = errno;
4487 }
4488
4489 break;
4490
4491
4492 case EMLXS_PARM_GET_NUM:
4493 {
4494 uint32_t *num;
4495 emlxs_config_t *cfg;
4496 uint32_t i;
4497 uint32_t count;
4498 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4499 "fca_port_manage: PARM_GET_NUM");
4500
4501 if (pm->pm_stat_len < sizeof (uint32_t)) {
4502 ret = FC_NOMEM;
4503 break;
4504 }
4505
4506 num = (uint32_t *)pm->pm_stat_buf;
4507 count = 0;
4508 cfg = &CFG;
4509 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4510 if (!(cfg->flags & PARM_HIDDEN)) {
4511 count++;
4512 }
4513
4514 }
4515
4516 *num = count;
4517
4518 break;
4519 }
4520
4521 case EMLXS_PARM_GET_LIST:
4522 {
4523 emlxs_parm_t *parm;
4524 emlxs_config_t *cfg;
4525 uint32_t i;
4526 uint32_t max_count;
4527
4528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4529 "fca_port_manage: PARM_GET_LIST");
4530
4531 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4532 ret = FC_NOMEM;
4533 break;
4534 }
4535
4536 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4537
4538 parm = (emlxs_parm_t *)pm->pm_stat_buf;
4539 cfg = &CFG;
4540 for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4541 cfg++) {
4542 if (!(cfg->flags & PARM_HIDDEN)) {
4543 (void) strncpy(parm->label, cfg->string,
4544 (sizeof (parm->label)-1));
4545 parm->min = cfg->low;
4546 parm->max = cfg->hi;
4547 parm->def = cfg->def;
4548 parm->current = cfg->current;
4549 parm->flags = cfg->flags;
4550 (void) strncpy(parm->help, cfg->help,
4551 (sizeof (parm->help)-1));
4552 parm++;
4553 max_count--;
4554 }
4555 }
4556
4557 break;
4558 }
4559
4560 case EMLXS_PARM_GET:
4561 {
4562 emlxs_parm_t *parm_in;
4563 emlxs_parm_t *parm_out;
4564 emlxs_config_t *cfg;
4565 uint32_t i;
4566 uint32_t len;
4567
4568 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4569 EMLXS_MSGF(EMLXS_CONTEXT,
4570 &emlxs_sfs_debug_msg,
4571 "fca_port_manage: PARM_GET. "
4572 "inbuf too small.");
4573
4574 ret = FC_BADCMD;
4575 break;
4576 }
4577
4578 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4579 EMLXS_MSGF(EMLXS_CONTEXT,
4580 &emlxs_sfs_debug_msg,
4581 "fca_port_manage: PARM_GET. "
4582 "outbuf too small");
4583
4584 ret = FC_BADCMD;
4585 break;
4586 }
4587
4588 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4589 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4590 len = strlen(parm_in->label);
4591 cfg = &CFG;
4592 ret = FC_BADOBJECT;
4593
4594 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4595 "fca_port_manage: PARM_GET: %s=0x%x,%d",
4596 parm_in->label, parm_in->current,
4597 parm_in->current);
4598
4599 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4600 if (len == strlen(cfg->string) &&
4601 (strcmp(parm_in->label,
4602 cfg->string) == 0)) {
4603 (void) strncpy(parm_out->label,
4604 cfg->string,
4605 (sizeof (parm_out->label)-1));
4606 parm_out->min = cfg->low;
4607 parm_out->max = cfg->hi;
4608 parm_out->def = cfg->def;
4609 parm_out->current = cfg->current;
4610 parm_out->flags = cfg->flags;
4611 (void) strncpy(parm_out->help,
4612 cfg->help,
4613 (sizeof (parm_out->help)-1));
4614
4615 ret = FC_SUCCESS;
4616 break;
4617 }
4618 }
4619
4620 break;
4621 }
4622
4623 case EMLXS_PARM_SET:
4624 {
4625 emlxs_parm_t *parm_in;
4626 emlxs_parm_t *parm_out;
4627 emlxs_config_t *cfg;
4628 uint32_t i;
4629 uint32_t len;
4630
4631 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4632 EMLXS_MSGF(EMLXS_CONTEXT,
4633 &emlxs_sfs_debug_msg,
4634 "fca_port_manage: PARM_GET. "
4635 "inbuf too small.");
4636
4637 ret = FC_BADCMD;
4638 break;
4639 }
4640
4641 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4642 EMLXS_MSGF(EMLXS_CONTEXT,
4643 &emlxs_sfs_debug_msg,
4644 "fca_port_manage: PARM_GET. "
4645 "outbuf too small");
4646 ret = FC_BADCMD;
4647 break;
4648 }
4649
4650 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4651 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4652 len = strlen(parm_in->label);
4653 cfg = &CFG;
4654 ret = FC_BADOBJECT;
4655
4656 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4657 "fca_port_manage: PARM_SET: %s=0x%x,%d",
4658 parm_in->label, parm_in->current,
4659 parm_in->current);
4660
4661 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4662 /* Find matching parameter string */
4663 if (len == strlen(cfg->string) &&
4664 (strcmp(parm_in->label,
4665 cfg->string) == 0)) {
4666 /* Attempt to update parameter */
4667 if (emlxs_set_parm(hba, i,
4668 parm_in->current) == FC_SUCCESS) {
4669 (void) strncpy(parm_out->label,
4670 cfg->string,
4671 (sizeof (parm_out->label)-
4672 1));
4673 parm_out->min = cfg->low;
4674 parm_out->max = cfg->hi;
4675 parm_out->def = cfg->def;
4676 parm_out->current =
4677 cfg->current;
4678 parm_out->flags = cfg->flags;
4679 (void) strncpy(parm_out->help,
4680 cfg->help,
4681 (sizeof (parm_out->help)-
4682 1));
4683
4684 ret = FC_SUCCESS;
4685 }
4686
4687 break;
4688 }
4689 }
4690
4691 break;
4692 }
4693
4694 case EMLXS_LOG_GET:
4695 {
4696 emlxs_log_req_t *req;
4697 emlxs_log_resp_t *resp;
4698 uint32_t len;
4699
4700 /* Check command size */
4701 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4702 ret = FC_BADCMD;
4703 break;
4704 }
4705
4706 /* Get the request */
4707 req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4708
4709 /* Calculate the response length from the request */
4710 len = sizeof (emlxs_log_resp_t) +
4711 (req->count * MAX_LOG_MSG_LENGTH);
4712
4713 /* Check the response buffer length */
4714 if (pm->pm_stat_len < len) {
4715 ret = FC_BADCMD;
4716 break;
4717 }
4718
4719 /* Get the response pointer */
4720 resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4721
4722 /* Get the request log enties */
4723 (void) emlxs_msg_log_get(hba, req, resp);
4724
4725 ret = FC_SUCCESS;
4726 break;
4727 }
4728
4729 case EMLXS_GET_BOOT_REV:
4730 {
4731 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4732 "fca_port_manage: GET_BOOT_REV");
4733
4734 if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4735 ret = FC_NOMEM;
4736 break;
4737 }
4738
4739 bzero(pm->pm_stat_buf, pm->pm_stat_len);
4740 (void) snprintf(pm->pm_stat_buf, pm->pm_stat_len,
4741 "%s %s", hba->model_info.model, vpd->boot_version);
4742
4743 break;
4744 }
4745
4746 case EMLXS_DOWNLOAD_BOOT:
4747 if (!(hba->flag & FC_ONLINE_MODE)) {
4748 return (FC_OFFLINE);
4749 }
4750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4751 "fca_port_manage: DOWNLOAD_BOOT");
4752
4753 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4754 pm->pm_data_len, 1);
4755 break;
4756
4757 case EMLXS_DOWNLOAD_CFL:
4758 {
4759 uint32_t *buffer;
4760 uint32_t region;
4761 uint32_t length;
4762
4763 if (!(hba->flag & FC_ONLINE_MODE)) {
4764 return (FC_OFFLINE);
4765 }
4766
4767 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4768 "fca_port_manage: DOWNLOAD_CFL");
4769
4770 /* Extract the region number from the first word. */
4771 buffer = (uint32_t *)pm->pm_data_buf;
4772 region = *buffer++;
4773
4774 /* Adjust the image length for the header word */
4775 length = pm->pm_data_len - 4;
4776
4777 ret =
4778 emlxs_cfl_download(hba, region, (caddr_t)buffer,
4779 length);
4780 break;
4781 }
4782
4783 case EMLXS_VPD_GET:
4784 {
4785 emlxs_vpd_desc_t *vpd_out;
4786
4787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4788 "fca_port_manage: VPD_GET");
4789
4790 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4791 ret = FC_BADCMD;
4792 break;
4793 }
4794
4795 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4796 bzero(vpd_out, pm->pm_stat_len);
4797
4798 (void) strncpy(vpd_out->id, vpd->id,
4799 (sizeof (vpd_out->id)-1));
4800 (void) strncpy(vpd_out->part_num, vpd->part_num,
4801 (sizeof (vpd_out->part_num)-1));
4802 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4803 (sizeof (vpd_out->eng_change)-1));
4804 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4805 (sizeof (vpd_out->manufacturer)-1));
4806 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4807 (sizeof (vpd_out->serial_num)-1));
4808 (void) strncpy(vpd_out->model, vpd->model,
4809 (sizeof (vpd_out->model)-1));
4810 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4811 (sizeof (vpd_out->model_desc)-1));
4812 (void) strncpy(vpd_out->port_num, vpd->port_num,
4813 (sizeof (vpd_out->port_num)-1));
4814 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4815 (sizeof (vpd_out->prog_types)-1));
4816
4817 ret = FC_SUCCESS;
4818
4819 break;
4820 }
4821
4822 case EMLXS_VPD_GET_V2:
4823 {
4824 emlxs_vpd_desc_v2_t *vpd_out;
4825
4826 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4827 "fca_port_manage: VPD_GET_V2");
4828
4829 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_v2_t)) {
4830 ret = FC_BADCMD;
4831 break;
4832 }
4833
4834 vpd_out = (emlxs_vpd_desc_v2_t *)pm->pm_stat_buf;
4835 bzero(vpd_out, pm->pm_stat_len);
4836
4837 (void) strncpy(vpd_out->id, vpd->id,
4838 (sizeof (vpd_out->id)-1));
4839 (void) strncpy(vpd_out->part_num, vpd->part_num,
4840 (sizeof (vpd_out->part_num)-1));
4841 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4842 (sizeof (vpd_out->eng_change)-1));
4843 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4844 (sizeof (vpd_out->manufacturer)-1));
4845 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4846 (sizeof (vpd_out->serial_num)-1));
4847 (void) strncpy(vpd_out->model, vpd->model,
4848 (sizeof (vpd_out->model)-1));
4849 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4850 (sizeof (vpd_out->model_desc)-1));
4851 (void) strncpy(vpd_out->port_num, vpd->port_num,
4852 (sizeof (vpd_out->port_num)-1));
4853 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4854 (sizeof (vpd_out->prog_types)-1));
4855
4856 ret = FC_SUCCESS;
4857
4858 break;
4859 }
4860
4861 case EMLXS_PHY_GET:
4862 {
4863 emlxs_phy_desc_t *phy_out;
4864 MAILBOXQ *mbq;
4865 MAILBOX4 *mb;
4866 IOCTL_COMMON_GET_PHY_DETAILS *phy;
4867 mbox_req_hdr_t *hdr_req;
4868
4869 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4870 "fca_port_manage: EMLXS_PHY_GET");
4871
4872 if (pm->pm_stat_len < sizeof (emlxs_phy_desc_t)) {
4873 ret = FC_BADCMD;
4874 break;
4875 }
4876
4877 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4878 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4879 "Invalid sli_mode. mode=%d", hba->sli_mode);
4880 ret = FC_BADCMD;
4881 break;
4882 }
4883
4884 phy_out = (emlxs_phy_desc_t *)pm->pm_stat_buf;
4885 bzero(phy_out, sizeof (emlxs_phy_desc_t));
4886
4887 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4888 MEM_MBOX)) == 0) {
4889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4890 "Unable to allocate mailbox buffer.");
4891 ret = FC_NOMEM;
4892 break;
4893 }
4894
4895 mb = (MAILBOX4*)mbq;
4896
4897 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
4898
4899 mb->un.varSLIConfig.be.embedded = 1;
4900 mbq->mbox_cmpl = NULL;
4901
4902 mb->mbxCommand = MBX_SLI_CONFIG;
4903 mb->mbxOwner = OWN_HOST;
4904
4905 hdr_req = (mbox_req_hdr_t *)
4906 &mb->un.varSLIConfig.be.un_hdr.hdr_req;
4907 hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
4908 hdr_req->opcode = COMMON_OPCODE_GET_PHY_DETAILS;
4909 hdr_req->timeout = 0;
4910 hdr_req->req_length =
4911 sizeof (IOCTL_COMMON_GET_PHY_DETAILS);
4912
4913 phy = (IOCTL_COMMON_GET_PHY_DETAILS *)(hdr_req + 1);
4914
4915 /* Send read request */
4916 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) !=
4917 MBX_SUCCESS) {
4918 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4919 "Unable to get PHY details. status=%x",
4920 mb->mbxStatus);
4921
4922 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4923
4924 ret = FC_FAILURE;
4925 break;
4926 }
4927
4928 phy_out->phy_type = phy->params.response.phy_type;
4929 phy_out->interface_type =
4930 phy->params.response.interface_type;
4931 phy_out->misc_params = phy->params.response.misc_params;
4932 phy_out->rsvd[0] = phy->params.response.rsvd[0];
4933 phy_out->rsvd[1] = phy->params.response.rsvd[1];
4934 phy_out->rsvd[2] = phy->params.response.rsvd[2];
4935 phy_out->rsvd[3] = phy->params.response.rsvd[3];
4936
4937 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4938
4939 ret = FC_SUCCESS;
4940 break;
4941 }
4942
4943 #ifdef NODE_THROTTLE_SUPPORT
4944 case EMLXS_SET_THROTTLE:
4945 {
4946 emlxs_node_t *node;
4947 uint32_t scope = 0;
4948 uint32_t i;
4949 char buf1[32];
4950 emlxs_throttle_desc_t *desc;
4951
4952 if ((pm->pm_data_buf == NULL) ||
4953 (pm->pm_data_len !=
4954 sizeof (emlxs_throttle_desc_t))) {
4955 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4956 "fca_port_manage: EMLXS_SET_THROTTLE: "
4957 "Descriptor buffer not valid. %d",
4958 pm->pm_data_len);
4959 ret = FC_BADCMD;
4960 break;
4961 }
4962
4963 if ((pm->pm_cmd_buf != NULL) &&
4964 (pm->pm_cmd_len == sizeof (uint32_t))) {
4965 scope = *(uint32_t *)pm->pm_cmd_buf;
4966 }
4967
4968 desc = (emlxs_throttle_desc_t *)pm->pm_data_buf;
4969 desc->throttle = MIN(desc->throttle, MAX_NODE_THROTTLE);
4970
4971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4972 "fca_port_manage: EMLXS_SET_THROTTLE: scope=%d "
4973 "depth=%d",
4974 scope, desc->throttle);
4975
4976 rw_enter(&port->node_rwlock, RW_WRITER);
4977 switch (scope) {
4978 case 1: /* all */
4979 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4980 node = port->node_table[i];
4981 while (node != NULL) {
4982 node->io_throttle = desc->throttle;
4983
4984 EMLXS_MSGF(EMLXS_CONTEXT,
4985 &emlxs_sfs_debug_msg,
4986 "EMLXS_SET_THROTTLE: wwpn=%s "
4987 "depth=%d",
4988 emlxs_wwn_xlate(buf1, sizeof (buf1),
4989 (uint8_t *)&node->nlp_portname),
4990 node->io_throttle);
4991
4992 node = (NODELIST *)node->nlp_list_next;
4993 }
4994 }
4995 break;
4996
4997 case 2: /* FCP */
4998 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4999 node = port->node_table[i];
5000 while (node != NULL) {
5001 if (!(node->nlp_fcp_info &
5002 NLP_FCP_TGT_DEVICE)) {
5003 node = (NODELIST *)
5004 node->nlp_list_next;
5005 continue;
5006 }
5007
5008 node->io_throttle = desc->throttle;
5009
5010 EMLXS_MSGF(EMLXS_CONTEXT,
5011 &emlxs_sfs_debug_msg,
5012 "EMLXS_SET_THROTTLE: wwpn=%s "
5013 "depth=%d",
5014 emlxs_wwn_xlate(buf1, sizeof (buf1),
5015 (uint8_t *)&node->nlp_portname),
5016 node->io_throttle);
5017
5018 node = (NODELIST *)node->nlp_list_next;
5019 }
5020 }
5021 break;
5022
5023 case 0: /* WWPN */
5024 default:
5025 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5026 node = port->node_table[i];
5027 while (node != NULL) {
5028 if (bcmp((caddr_t)&node->nlp_portname,
5029 desc->wwpn, 8)) {
5030 node = (NODELIST *)
5031 node->nlp_list_next;
5032 continue;
5033 }
5034
5035 node->io_throttle = desc->throttle;
5036
5037 EMLXS_MSGF(EMLXS_CONTEXT,
5038 &emlxs_sfs_debug_msg,
5039 "EMLXS_SET_THROTTLE: wwpn=%s "
5040 "depth=%d",
5041 emlxs_wwn_xlate(buf1, sizeof (buf1),
5042 (uint8_t *)&node->nlp_portname),
5043 node->io_throttle);
5044
5045 goto set_throttle_done;
5046 }
5047 }
5048 set_throttle_done:
5049 break;
5050 }
5051
5052 rw_exit(&port->node_rwlock);
5053 ret = FC_SUCCESS;
5054
5055 break;
5056 }
5057
5058 case EMLXS_GET_THROTTLE:
5059 {
5060 emlxs_node_t *node;
5061 uint32_t i;
5062 uint32_t j;
5063 char buf1[32];
5064 uint32_t count;
5065 emlxs_throttle_desc_t *desc;
5066
5067 if (pm->pm_stat_len == sizeof (uint32_t)) {
5068 count = emlxs_nport_count(port);
5069 *(uint32_t *)pm->pm_stat_buf = count;
5070
5071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5072 "fca_port_manage: EMLXS_GET_THROTTLE: "
5073 "count=%d",
5074 count);
5075
5076 ret = FC_SUCCESS;
5077 break;
5078 }
5079
5080 if ((pm->pm_stat_buf == NULL) ||
5081 (pm->pm_stat_len <
5082 sizeof (emlxs_throttle_desc_t))) {
5083 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5084 "fca_port_manage: EMLXS_GET_THROTTLE: "
5085 "Descriptor buffer too small. %d",
5086 pm->pm_data_len);
5087 ret = FC_BADCMD;
5088 break;
5089 }
5090
5091 count = pm->pm_stat_len /
5092 sizeof (emlxs_throttle_desc_t);
5093 desc = (emlxs_throttle_desc_t *)pm->pm_stat_buf;
5094
5095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5096 "fca_port_manage: EMLXS_GET_THROTTLE: max=%d",
5097 count);
5098
5099 rw_enter(&port->node_rwlock, RW_READER);
5100 j = 0;
5101 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5102 node = port->node_table[i];
5103 while (node != NULL) {
5104 if ((node->nlp_DID & 0xFFF000) ==
5105 0xFFF000) {
5106 node = (NODELIST *)
5107 node->nlp_list_next;
5108 continue;
5109 }
5110
5111 bcopy((uint8_t *)&node->nlp_portname,
5112 desc[j].wwpn, 8);
5113 desc[j].throttle = node->io_throttle;
5114
5115 EMLXS_MSGF(EMLXS_CONTEXT,
5116 &emlxs_sfs_debug_msg,
5117 "EMLXS_GET_THROTTLE: wwpn=%s "
5118 "depth=%d",
5119 emlxs_wwn_xlate(buf1, sizeof (buf1),
5120 desc[j].wwpn),
5121 desc[j].throttle);
5122
5123 j++;
5124 if (j >= count) {
5125 goto get_throttle_done;
5126 }
5127
5128 node = (NODELIST *)node->nlp_list_next;
5129 }
5130 }
5131 get_throttle_done:
5132 rw_exit(&port->node_rwlock);
5133 ret = FC_SUCCESS;
5134
5135 break;
5136 }
5137 #endif /* NODE_THROTTLE_SUPPORT */
5138
5139 case EMLXS_GET_FCIO_REV:
5140 {
5141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5142 "fca_port_manage: GET_FCIO_REV");
5143
5144 if (pm->pm_stat_len < sizeof (uint32_t)) {
5145 ret = FC_NOMEM;
5146 break;
5147 }
5148
5149 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5150 *(uint32_t *)pm->pm_stat_buf = FCIO_REV;
5151
5152 break;
5153 }
5154
5155 case EMLXS_GET_DFC_REV:
5156 {
5157 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5158 "fca_port_manage: GET_DFC_REV");
5159
5160 if (pm->pm_stat_len < sizeof (uint32_t)) {
5161 ret = FC_NOMEM;
5162 break;
5163 }
5164
5165 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5166 *(uint32_t *)pm->pm_stat_buf = DFC_REV;
5167
5168 break;
5169 }
5170
5171 case EMLXS_SET_BOOT_STATE:
5172 case EMLXS_SET_BOOT_STATE_old:
5173 {
5174 uint32_t state;
5175
5176 if (!(hba->flag & FC_ONLINE_MODE)) {
5177 return (FC_OFFLINE);
5178 }
5179 if (pm->pm_cmd_len < sizeof (uint32_t)) {
5180 EMLXS_MSGF(EMLXS_CONTEXT,
5181 &emlxs_sfs_debug_msg,
5182 "fca_port_manage: SET_BOOT_STATE");
5183 ret = FC_BADCMD;
5184 break;
5185 }
5186
5187 state = *(uint32_t *)pm->pm_cmd_buf;
5188
5189 if (state == 0) {
5190 EMLXS_MSGF(EMLXS_CONTEXT,
5191 &emlxs_sfs_debug_msg,
5192 "fca_port_manage: SET_BOOT_STATE: "
5193 "Disable");
5194 ret = emlxs_boot_code_disable(hba);
5195 } else {
5196 EMLXS_MSGF(EMLXS_CONTEXT,
5197 &emlxs_sfs_debug_msg,
5198 "fca_port_manage: SET_BOOT_STATE: "
5199 "Enable");
5200 ret = emlxs_boot_code_enable(hba);
5201 }
5202
5203 break;
5204 }
5205
5206 case EMLXS_GET_BOOT_STATE:
5207 case EMLXS_GET_BOOT_STATE_old:
5208 {
5209 if (!(hba->flag & FC_ONLINE_MODE)) {
5210 return (FC_OFFLINE);
5211 }
5212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5213 "fca_port_manage: GET_BOOT_STATE");
5214
5215 if (pm->pm_stat_len < sizeof (uint32_t)) {
5216 ret = FC_NOMEM;
5217 break;
5218 }
5219 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5220
5221 ret = emlxs_boot_code_state(hba);
5222
5223 if (ret == FC_SUCCESS) {
5224 *(uint32_t *)pm->pm_stat_buf = 1;
5225 ret = FC_SUCCESS;
5226 } else if (ret == FC_FAILURE) {
5227 ret = FC_SUCCESS;
5228 }
5229
5230 break;
5231 }
5232
5233 case EMLXS_HW_ERROR_TEST:
5234 {
5235 /*
5236 * This command is used for simulating HW ERROR
5237 * on SLI4 only.
5238 */
5239 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5240 ret = FC_INVALID_REQUEST;
5241 break;
5242 }
5243 hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR;
5244 break;
5245 }
5246
5247 case EMLXS_MB_TIMEOUT_TEST:
5248 {
5249 if (!(hba->flag & FC_ONLINE_MODE)) {
5250 return (FC_OFFLINE);
5251 }
5252
5253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5254 "fca_port_manage: HW_ERROR_TEST");
5255
5256 /* Trigger a mailbox timeout */
5257 hba->mbox_timer = hba->timer_tics;
5258
5259 break;
5260 }
5261
5262 case EMLXS_TEST_CODE:
5263 {
5264 uint32_t *cmd;
5265
5266 if (!(hba->flag & FC_ONLINE_MODE)) {
5267 return (FC_OFFLINE);
5268 }
5269
5270 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5271 "fca_port_manage: TEST_CODE");
5272
5273 if (pm->pm_cmd_len < sizeof (uint32_t)) {
5274 EMLXS_MSGF(EMLXS_CONTEXT,
5275 &emlxs_sfs_debug_msg,
5276 "fca_port_manage: TEST_CODE. "
5277 "inbuf to small.");
5278
5279 ret = FC_BADCMD;
5280 break;
5281 }
5282
5283 cmd = (uint32_t *)pm->pm_cmd_buf;
5284
5285 ret = emlxs_test(hba, cmd[0],
5286 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
5287
5288 break;
5289 }
5290
5291 case EMLXS_BAR_IO:
5292 {
5293 uint32_t *cmd;
5294 uint32_t *datap;
5295 FCIO_Q_STAT_t *qp;
5296 clock_t time;
5297 uint32_t offset;
5298 caddr_t addr;
5299 uint32_t i;
5300 uint32_t tx_cnt;
5301 uint32_t chip_cnt;
5302
5303 cmd = (uint32_t *)pm->pm_cmd_buf;
5304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5305 "fca_port_manage: BAR_IO %x %x %x",
5306 cmd[0], cmd[1], cmd[2]);
5307
5308 offset = cmd[1];
5309
5310 ret = FC_SUCCESS;
5311
5312 switch (cmd[0]) {
5313 case 2: /* bar1read */
5314 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5315 return (FC_BADCMD);
5316 }
5317
5318 /* Registers in this range are invalid */
5319 if ((offset >= 0x4C00) && (offset < 0x5000)) {
5320 return (FC_BADCMD);
5321 }
5322 if ((offset >= 0x5800) || (offset & 0x3)) {
5323 return (FC_BADCMD);
5324 }
5325 datap = (uint32_t *)pm->pm_stat_buf;
5326
5327 for (i = 0; i < pm->pm_stat_len;
5328 i += sizeof (uint32_t)) {
5329 if ((offset >= 0x4C00) &&
5330 (offset < 0x5000)) {
5331 pm->pm_stat_len = i;
5332 break;
5333 }
5334 if (offset >= 0x5800) {
5335 pm->pm_stat_len = i;
5336 break;
5337 }
5338 addr = hba->sli.sli4.bar1_addr + offset;
5339 *datap = READ_BAR1_REG(hba, addr);
5340 datap++;
5341 offset += sizeof (uint32_t);
5342 }
5343 #ifdef FMA_SUPPORT
5344 /* Access handle validation */
5345 EMLXS_CHK_ACC_HANDLE(hba,
5346 hba->sli.sli4.bar1_acc_handle);
5347 #endif /* FMA_SUPPORT */
5348 break;
5349 case 3: /* bar2read */
5350 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5351 return (FC_BADCMD);
5352 }
5353 if ((offset >= 0x1000) || (offset & 0x3)) {
5354 return (FC_BADCMD);
5355 }
5356 datap = (uint32_t *)pm->pm_stat_buf;
5357
5358 for (i = 0; i < pm->pm_stat_len;
5359 i += sizeof (uint32_t)) {
5360 *datap = READ_BAR2_REG(hba,
5361 hba->sli.sli4.bar2_addr + offset);
5362 datap++;
5363 offset += sizeof (uint32_t);
5364 }
5365 #ifdef FMA_SUPPORT
5366 /* Access handle validation */
5367 EMLXS_CHK_ACC_HANDLE(hba,
5368 hba->sli.sli4.bar2_acc_handle);
5369 #endif /* FMA_SUPPORT */
5370 break;
5371 case 4: /* bar1write */
5372 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5373 return (FC_BADCMD);
5374 }
5375 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
5376 offset, cmd[2]);
5377 #ifdef FMA_SUPPORT
5378 /* Access handle validation */
5379 EMLXS_CHK_ACC_HANDLE(hba,
5380 hba->sli.sli4.bar1_acc_handle);
5381 #endif /* FMA_SUPPORT */
5382 break;
5383 case 5: /* bar2write */
5384 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5385 return (FC_BADCMD);
5386 }
5387 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
5388 offset, cmd[2]);
5389 #ifdef FMA_SUPPORT
5390 /* Access handle validation */
5391 EMLXS_CHK_ACC_HANDLE(hba,
5392 hba->sli.sli4.bar2_acc_handle);
5393 #endif /* FMA_SUPPORT */
5394 break;
5395 case 6: /* dumpbsmbox */
5396 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5397 return (FC_BADCMD);
5398 }
5399 if (offset != 0) {
5400 return (FC_BADCMD);
5401 }
5402
5403 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
5404 (caddr_t)pm->pm_stat_buf, 256);
5405 break;
5406 case 7: /* pciread */
5407 if ((offset >= 0x200) || (offset & 0x3)) {
5408 return (FC_BADCMD);
5409 }
5410 datap = (uint32_t *)pm->pm_stat_buf;
5411 for (i = 0; i < pm->pm_stat_len;
5412 i += sizeof (uint32_t)) {
5413 *datap = ddi_get32(hba->pci_acc_handle,
5414 (uint32_t *)(hba->pci_addr +
5415 offset));
5416 datap++;
5417 offset += sizeof (uint32_t);
5418 }
5419 #ifdef FMA_SUPPORT
5420 /* Access handle validation */
5421 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
5422 #endif /* FMA_SUPPORT */
5423 break;
5424 case 8: /* abortall */
5425 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5426 return (FC_BADCMD);
5427 }
5428 emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
5429 datap = (uint32_t *)pm->pm_stat_buf;
5430 *datap++ = tx_cnt;
5431 *datap = chip_cnt;
5432 break;
5433 case 9: /* get_q_info */
5434 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5435 return (FC_BADCMD);
5436 }
5437 qp = (FCIO_Q_STAT_t *)pm->pm_stat_buf;
5438 for (i = 0; i < FCIO_MAX_EQS; i++) {
5439 addr = hba->sli.sli4.eq[i].addr.virt;
5440 qp->eq[i].host_index =
5441 hba->sli.sli4.eq[i].host_index;
5442 qp->eq[i].max_index =
5443 hba->sli.sli4.eq[i].max_index;
5444 qp->eq[i].qid =
5445 hba->sli.sli4.eq[i].qid;
5446 qp->eq[i].msix_vector =
5447 hba->sli.sli4.eq[i].msix_vector;
5448 qp->eq[i].phys =
5449 hba->sli.sli4.eq[i].addr.phys;
5450 qp->eq[i].virt = PADDR_LO(
5451 (uintptr_t)addr);
5452 qp->eq[i].virt_hi = PADDR_HI(
5453 (uintptr_t)addr);
5454 qp->eq[i].max_proc =
5455 hba->sli.sli4.eq[i].max_proc;
5456 qp->eq[i].isr_count =
5457 hba->sli.sli4.eq[i].isr_count;
5458 qp->eq[i].num_proc =
5459 hba->sli.sli4.eq[i].num_proc;
5460 }
5461 for (i = 0; i < FCIO_MAX_CQS; i++) {
5462 addr = hba->sli.sli4.cq[i].addr.virt;
5463 qp->cq[i].host_index =
5464 hba->sli.sli4.cq[i].host_index;
5465 qp->cq[i].max_index =
5466 hba->sli.sli4.cq[i].max_index;
5467 qp->cq[i].qid =
5468 hba->sli.sli4.cq[i].qid;
5469 qp->cq[i].eqid =
5470 hba->sli.sli4.cq[i].eqid;
5471 qp->cq[i].type =
5472 hba->sli.sli4.cq[i].type;
5473 qp->cq[i].phys =
5474 hba->sli.sli4.cq[i].addr.phys;
5475 qp->cq[i].virt = PADDR_LO(
5476 (uintptr_t)addr);
5477 qp->cq[i].virt_hi = PADDR_HI(
5478 (uintptr_t)addr);
5479 qp->cq[i].max_proc =
5480 hba->sli.sli4.cq[i].max_proc;
5481 qp->cq[i].isr_count =
5482 hba->sli.sli4.cq[i].isr_count;
5483 qp->cq[i].num_proc =
5484 hba->sli.sli4.cq[i].num_proc;
5485 }
5486 for (i = 0; i < FCIO_MAX_WQS; i++) {
5487 addr = hba->sli.sli4.wq[i].addr.virt;
5488 qp->wq[i].host_index =
5489 hba->sli.sli4.wq[i].host_index;
5490 qp->wq[i].max_index =
5491 hba->sli.sli4.wq[i].max_index;
5492 qp->wq[i].port_index =
5493 hba->sli.sli4.wq[i].port_index;
5494 qp->wq[i].release_depth =
5495 hba->sli.sli4.wq[i].release_depth;
5496 qp->wq[i].qid =
5497 hba->sli.sli4.wq[i].qid;
5498 qp->wq[i].cqid =
5499 hba->sli.sli4.wq[i].cqid;
5500 qp->wq[i].phys =
5501 hba->sli.sli4.wq[i].addr.phys;
5502 qp->wq[i].virt = PADDR_LO(
5503 (uintptr_t)addr);
5504 qp->wq[i].virt_hi = PADDR_HI(
5505 (uintptr_t)addr);
5506 qp->wq[i].num_proc =
5507 hba->sli.sli4.wq[i].num_proc;
5508 qp->wq[i].num_busy =
5509 hba->sli.sli4.wq[i].num_busy;
5510 }
5511 for (i = 0; i < FCIO_MAX_RQS; i++) {
5512 addr = hba->sli.sli4.rq[i].addr.virt;
5513 qp->rq[i].qid =
5514 hba->sli.sli4.rq[i].qid;
5515 qp->rq[i].cqid =
5516 hba->sli.sli4.rq[i].cqid;
5517 qp->rq[i].host_index =
5518 hba->sli.sli4.rq[i].host_index;
5519 qp->rq[i].max_index =
5520 hba->sli.sli4.rq[i].max_index;
5521 qp->rq[i].phys =
5522 hba->sli.sli4.rq[i].addr.phys;
5523 qp->rq[i].virt = PADDR_LO(
5524 (uintptr_t)addr);
5525 qp->rq[i].virt_hi = PADDR_HI(
5526 (uintptr_t)addr);
5527 qp->rq[i].num_proc =
5528 hba->sli.sli4.rq[i].num_proc;
5529 }
5530 qp->que_start_timer =
5531 hba->sli.sli4.que_stat_timer;
5532 (void) drv_getparm(LBOLT, &time);
5533 qp->que_current_timer = (uint32_t)time;
5534 qp->intr_count = hba->intr_count;
5535 break;
5536 case 10: /* zero_q_stat */
5537 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5538 return (FC_BADCMD);
5539 }
5540 emlxs_sli4_zero_queue_stat(hba);
5541 break;
5542 default:
5543 ret = FC_BADCMD;
5544 break;
5545 }
5546 break;
5547 }
5548
5549 default:
5550
5551 ret = FC_INVALID_REQUEST;
5552 break;
5553 }
5554
5555 break;
5556
5557 }
5558
5559 case FC_PORT_INITIALIZE:
5560 if (!(hba->flag & FC_ONLINE_MODE)) {
5561 return (FC_OFFLINE);
5562 }
5563 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5564 "fca_port_manage: FC_PORT_INITIALIZE");
5565 break;
5566
5567 case FC_PORT_LOOPBACK:
5568 if (!(hba->flag & FC_ONLINE_MODE)) {
5569 return (FC_OFFLINE);
5570 }
5571 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5572 "fca_port_manage: FC_PORT_LOOPBACK");
5573 break;
5574
5575 case FC_PORT_BYPASS:
5576 if (!(hba->flag & FC_ONLINE_MODE)) {
5577 return (FC_OFFLINE);
5578 }
5579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5580 "fca_port_manage: FC_PORT_BYPASS");
5581 ret = FC_INVALID_REQUEST;
5582 break;
5583
5584 case FC_PORT_UNBYPASS:
5585 if (!(hba->flag & FC_ONLINE_MODE)) {
5586 return (FC_OFFLINE);
5587 }
5588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5589 "fca_port_manage: FC_PORT_UNBYPASS");
5590 ret = FC_INVALID_REQUEST;
5591 break;
5592
5593 case FC_PORT_GET_NODE_ID:
5594 {
5595 fc_rnid_t *rnid;
5596
5597 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5598 "fca_port_manage: FC_PORT_GET_NODE_ID");
5599
5600 bzero(pm->pm_data_buf, pm->pm_data_len);
5601
5602 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5603 ret = FC_NOMEM;
5604 break;
5605 }
5606
5607 rnid = (fc_rnid_t *)pm->pm_data_buf;
5608
5609 (void) snprintf((char *)rnid->global_id,
5610 (sizeof (rnid->global_id)-1),
5611 "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5612 hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5613 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5614 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5615 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5616
5617 rnid->unit_type = RNID_HBA;
5618 rnid->port_id = port->did;
5619 rnid->ip_version = RNID_IPV4;
5620
5621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5622 "GET_NODE_ID: wwpn: %s", rnid->global_id);
5623 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5624 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5626 "GET_NODE_ID: port_id: 0x%x", rnid->port_id);
5627 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5628 "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5629 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5630 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5632 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5633 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5634 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5635 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5636 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5637 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5638 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5639
5640 ret = FC_SUCCESS;
5641 break;
5642 }
5643
5644 case FC_PORT_SET_NODE_ID:
5645 {
5646 fc_rnid_t *rnid;
5647
5648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5649 "fca_port_manage: FC_PORT_SET_NODE_ID");
5650
5651 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5652 ret = FC_NOMEM;
5653 break;
5654 }
5655
5656 rnid = (fc_rnid_t *)pm->pm_data_buf;
5657
5658 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5659 "SET_NODE_ID: wwpn: %s", rnid->global_id);
5660 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5661 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5662 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5663 "SET_NODE_ID: port_id: 0x%x", rnid->port_id);
5664 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5665 "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5667 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5668 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5669 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5670 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5671 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5673 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5675 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5676
5677 ret = FC_SUCCESS;
5678 break;
5679 }
5680
5681 #ifdef S11
5682 case FC_PORT_GET_P2P_INFO:
5683 {
5684 fc_fca_p2p_info_t *p2p_info;
5685 NODELIST *ndlp;
5686
5687 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5688 "fca_port_manage: FC_PORT_GET_P2P_INFO");
5689
5690 bzero(pm->pm_data_buf, pm->pm_data_len);
5691
5692 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5693 ret = FC_NOMEM;
5694 break;
5695 }
5696
5697 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5698
5699 if (hba->state >= FC_LINK_UP) {
5700 if ((hba->topology == TOPOLOGY_PT_PT) &&
5701 (hba->flag & FC_PT_TO_PT)) {
5702 p2p_info->fca_d_id = port->did;
5703 p2p_info->d_id = port->rdid;
5704
5705 ndlp = emlxs_node_find_did(port,
5706 port->rdid, 1);
5707
5708 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5709 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5710 "d_id: 0x%x, ndlp: 0x%p", port->did,
5711 port->rdid, ndlp);
5712 if (ndlp) {
5713 bcopy(&ndlp->nlp_portname,
5714 (caddr_t)&p2p_info->pwwn,
5715 sizeof (la_wwn_t));
5716 bcopy(&ndlp->nlp_nodename,
5717 (caddr_t)&p2p_info->nwwn,
5718 sizeof (la_wwn_t));
5719
5720 ret = FC_SUCCESS;
5721 break;
5722
5723 }
5724 }
5725 }
5726
5727 ret = FC_FAILURE;
5728 break;
5729 }
5730 #endif /* S11 */
5731
5732 default:
5733 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5734 "fca_port_manage: code=%x", pm->pm_cmd_code);
5735 ret = FC_INVALID_REQUEST;
5736 break;
5737
5738 }
5739
5740 return (ret);
5741
5742 } /* emlxs_fca_port_manage() */
5743
5744
5745 /*ARGSUSED*/
5746 static uint32_t
emlxs_test(emlxs_hba_t * hba,uint32_t test_code,uint32_t args,uint32_t * arg)5747 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5748 uint32_t *arg)
5749 {
5750 uint32_t rval = 0;
5751 emlxs_port_t *port = &PPORT;
5752
5753 switch (test_code) {
5754 #ifdef TEST_SUPPORT
5755 case 1: /* SCSI underrun */
5756 {
5757 hba->underrun_counter = (args)? arg[0]:1;
5758 break;
5759 }
5760 #endif /* TEST_SUPPORT */
5761
5762 default:
5763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5764 "test: Unsupported test code. (0x%x)", test_code);
5765 rval = FC_INVALID_REQUEST;
5766 }
5767
5768 return (rval);
5769
5770 } /* emlxs_test() */
5771
5772
5773 /*
5774 * Given the device number, return the devinfo pointer or the ddiinst number.
5775 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5776 * before attach.
5777 *
5778 * Translate "dev_t" to a pointer to the associated "dev_info_t".
5779 */
5780 /*ARGSUSED*/
5781 static int
emlxs_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)5782 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5783 {
5784 emlxs_hba_t *hba;
5785 int32_t ddiinst;
5786
5787 ddiinst = getminor((dev_t)arg);
5788
5789 switch (infocmd) {
5790 case DDI_INFO_DEVT2DEVINFO:
5791 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5792 if (hba)
5793 *result = hba->dip;
5794 else
5795 *result = NULL;
5796 break;
5797
5798 case DDI_INFO_DEVT2INSTANCE:
5799 *result = (void *)((unsigned long)ddiinst);
5800 break;
5801
5802 default:
5803 return (DDI_FAILURE);
5804 }
5805
5806 return (DDI_SUCCESS);
5807
5808 } /* emlxs_info() */
5809
5810
5811 static int32_t
emlxs_power(dev_info_t * dip,int32_t comp,int32_t level)5812 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5813 {
5814 emlxs_hba_t *hba;
5815 emlxs_port_t *port;
5816 int32_t ddiinst;
5817 int rval = DDI_SUCCESS;
5818
5819 ddiinst = ddi_get_instance(dip);
5820 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5821 port = &PPORT;
5822
5823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5824 "fca_power: comp=%x level=%x", comp, level);
5825
5826 if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5827 return (DDI_FAILURE);
5828 }
5829
5830 mutex_enter(&EMLXS_PM_LOCK);
5831
5832 /* If we are already at the proper level then return success */
5833 if (hba->pm_level == level) {
5834 mutex_exit(&EMLXS_PM_LOCK);
5835 return (DDI_SUCCESS);
5836 }
5837
5838 switch (level) {
5839 case EMLXS_PM_ADAPTER_UP:
5840
5841 /*
5842 * If we are already in emlxs_attach,
5843 * let emlxs_hba_attach take care of things
5844 */
5845 if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5846 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5847 break;
5848 }
5849
5850 /* Check if adapter is suspended */
5851 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5852 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5853
5854 /* Try to resume the port */
5855 rval = emlxs_hba_resume(dip);
5856
5857 if (rval != DDI_SUCCESS) {
5858 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5859 }
5860 break;
5861 }
5862
5863 /* Set adapter up */
5864 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5865 break;
5866
5867 case EMLXS_PM_ADAPTER_DOWN:
5868
5869
5870 /*
5871 * If we are already in emlxs_detach,
5872 * let emlxs_hba_detach take care of things
5873 */
5874 if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5875 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5876 break;
5877 }
5878
5879 /* Check if adapter is not suspended */
5880 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5881 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5882
5883 /* Try to suspend the port */
5884 rval = emlxs_hba_suspend(dip);
5885
5886 if (rval != DDI_SUCCESS) {
5887 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5888 }
5889
5890 break;
5891 }
5892
5893 /* Set adapter down */
5894 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5895 break;
5896
5897 default:
5898 rval = DDI_FAILURE;
5899 break;
5900
5901 }
5902
5903 mutex_exit(&EMLXS_PM_LOCK);
5904
5905 return (rval);
5906
5907 } /* emlxs_power() */
5908
5909
5910 #ifdef EMLXS_I386
5911 #ifdef S11
5912 /*
5913 * quiesce(9E) entry point.
5914 *
5915 * This function is called when the system is single-thread at hight PIL
5916 * with preemption disabled. Therefore, this function must not be blocked.
5917 *
5918 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5919 * DDI_FAILURE indicates an error condition and should almost never happen.
5920 */
5921 static int
emlxs_quiesce(dev_info_t * dip)5922 emlxs_quiesce(dev_info_t *dip)
5923 {
5924 emlxs_hba_t *hba;
5925 emlxs_port_t *port;
5926 int32_t ddiinst;
5927 int rval = DDI_SUCCESS;
5928
5929 ddiinst = ddi_get_instance(dip);
5930 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5931 port = &PPORT;
5932
5933 if (hba == NULL || port == NULL) {
5934 return (DDI_FAILURE);
5935 }
5936
5937 /* The fourth arg 1 indicates the call is from quiesce */
5938 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5939 return (rval);
5940 } else {
5941 return (DDI_FAILURE);
5942 }
5943
5944 } /* emlxs_quiesce */
5945 #endif /* S11 */
5946 #endif /* EMLXS_I386 */
5947
5948
5949 static int
emlxs_open(dev_t * dev_p,int32_t flag,int32_t otype,cred_t * cred_p)5950 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5951 {
5952 emlxs_hba_t *hba;
5953 emlxs_port_t *port;
5954 int ddiinst;
5955
5956 ddiinst = getminor(*dev_p);
5957 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5958
5959 if (hba == NULL) {
5960 return (ENXIO);
5961 }
5962
5963 port = &PPORT;
5964
5965 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5967 "open failed: Driver suspended.");
5968 return (ENXIO);
5969 }
5970
5971 if (otype != OTYP_CHR) {
5972 return (EINVAL);
5973 }
5974
5975 if (drv_priv(cred_p)) {
5976 return (EPERM);
5977 }
5978
5979 mutex_enter(&EMLXS_IOCTL_LOCK);
5980
5981 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5982 mutex_exit(&EMLXS_IOCTL_LOCK);
5983 return (EBUSY);
5984 }
5985
5986 if (flag & FEXCL) {
5987 if (hba->ioctl_flags & EMLXS_OPEN) {
5988 mutex_exit(&EMLXS_IOCTL_LOCK);
5989 return (EBUSY);
5990 }
5991
5992 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5993 }
5994
5995 hba->ioctl_flags |= EMLXS_OPEN;
5996
5997 mutex_exit(&EMLXS_IOCTL_LOCK);
5998
5999 return (0);
6000
6001 } /* emlxs_open() */
6002
6003
6004 /*ARGSUSED*/
6005 static int
emlxs_close(dev_t dev,int32_t flag,int32_t otype,cred_t * cred_p)6006 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
6007 {
6008 emlxs_hba_t *hba;
6009 int ddiinst;
6010
6011 ddiinst = getminor(dev);
6012 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6013
6014 if (hba == NULL) {
6015 return (ENXIO);
6016 }
6017
6018 if (otype != OTYP_CHR) {
6019 return (EINVAL);
6020 }
6021
6022 mutex_enter(&EMLXS_IOCTL_LOCK);
6023
6024 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6025 mutex_exit(&EMLXS_IOCTL_LOCK);
6026 return (ENODEV);
6027 }
6028
6029 hba->ioctl_flags &= ~EMLXS_OPEN;
6030 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
6031
6032 mutex_exit(&EMLXS_IOCTL_LOCK);
6033
6034 return (0);
6035
6036 } /* emlxs_close() */
6037
6038
6039 /*ARGSUSED*/
6040 static int
emlxs_ioctl(dev_t dev,int32_t cmd,intptr_t arg,int32_t mode,cred_t * cred_p,int32_t * rval_p)6041 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
6042 cred_t *cred_p, int32_t *rval_p)
6043 {
6044 emlxs_hba_t *hba;
6045 emlxs_port_t *port;
6046 int rval = 0; /* return code */
6047 int ddiinst;
6048
6049 ddiinst = getminor(dev);
6050 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6051
6052 if (hba == NULL) {
6053 return (ENXIO);
6054 }
6055
6056 port = &PPORT;
6057
6058 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6059 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6060 "ioctl failed: Driver suspended.");
6061
6062 return (ENXIO);
6063 }
6064
6065 mutex_enter(&EMLXS_IOCTL_LOCK);
6066 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6067 mutex_exit(&EMLXS_IOCTL_LOCK);
6068 return (ENXIO);
6069 }
6070 mutex_exit(&EMLXS_IOCTL_LOCK);
6071
6072 #ifdef IDLE_TIMER
6073 emlxs_pm_busy_component(hba);
6074 #endif /* IDLE_TIMER */
6075
6076 switch (cmd) {
6077 case EMLXS_DFC_COMMAND:
6078 rval = emlxs_dfc_manage(hba, (void *)arg, mode);
6079 break;
6080
6081 default:
6082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6083 "ioctl: Invalid command received. cmd=%x", cmd);
6084 rval = EINVAL;
6085 }
6086
6087 return (rval);
6088
6089 } /* emlxs_ioctl() */
6090
6091
6092
6093 /*
6094 *
6095 * Device Driver Common Routines
6096 *
6097 */
6098
6099 /* EMLXS_PM_LOCK must be held for this call */
6100 static int
emlxs_hba_resume(dev_info_t * dip)6101 emlxs_hba_resume(dev_info_t *dip)
6102 {
6103 emlxs_hba_t *hba;
6104 emlxs_port_t *port;
6105 int ddiinst;
6106
6107 ddiinst = ddi_get_instance(dip);
6108 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6109 port = &PPORT;
6110
6111 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
6112
6113 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
6114 return (DDI_SUCCESS);
6115 }
6116
6117 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6118
6119 /* Re-enable the physical port on this HBA */
6120 port->flag |= EMLXS_PORT_ENABLED;
6121
6122 /* Take the adapter online */
6123 if (emlxs_power_up(hba)) {
6124 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
6125 "Unable to take adapter online.");
6126
6127 hba->pm_state |= EMLXS_PM_SUSPENDED;
6128
6129 return (DDI_FAILURE);
6130 }
6131
6132 return (DDI_SUCCESS);
6133
6134 } /* emlxs_hba_resume() */
6135
6136
6137 /* EMLXS_PM_LOCK must be held for this call */
6138 static int
emlxs_hba_suspend(dev_info_t * dip)6139 emlxs_hba_suspend(dev_info_t *dip)
6140 {
6141 emlxs_hba_t *hba;
6142 emlxs_port_t *port;
6143 int ddiinst;
6144
6145 ddiinst = ddi_get_instance(dip);
6146 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6147 port = &PPORT;
6148
6149 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
6150
6151 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6152 return (DDI_SUCCESS);
6153 }
6154
6155 hba->pm_state |= EMLXS_PM_SUSPENDED;
6156
6157 /* Take the adapter offline */
6158 if (emlxs_power_down(hba)) {
6159 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6160
6161 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
6162 "Unable to take adapter offline.");
6163
6164 return (DDI_FAILURE);
6165 }
6166
6167 return (DDI_SUCCESS);
6168
6169 } /* emlxs_hba_suspend() */
6170
6171
6172
6173 static void
emlxs_lock_init(emlxs_hba_t * hba)6174 emlxs_lock_init(emlxs_hba_t *hba)
6175 {
6176 emlxs_port_t *port = &PPORT;
6177 uint32_t i;
6178
6179 /* Initialize the power management */
6180 mutex_init(&EMLXS_PM_LOCK, NULL, MUTEX_DRIVER,
6181 DDI_INTR_PRI(hba->intr_arg));
6182
6183 mutex_init(&EMLXS_TIMER_LOCK, NULL, MUTEX_DRIVER,
6184 DDI_INTR_PRI(hba->intr_arg));
6185
6186 cv_init(&hba->timer_lock_cv, NULL, CV_DRIVER, NULL);
6187
6188 mutex_init(&EMLXS_PORT_LOCK, NULL, MUTEX_DRIVER,
6189 DDI_INTR_PRI(hba->intr_arg));
6190
6191 mutex_init(&EMLXS_MBOX_LOCK, NULL, MUTEX_DRIVER,
6192 DDI_INTR_PRI(hba->intr_arg));
6193
6194 cv_init(&EMLXS_MBOX_CV, NULL, CV_DRIVER, NULL);
6195
6196 mutex_init(&EMLXS_LINKUP_LOCK, NULL, MUTEX_DRIVER,
6197 DDI_INTR_PRI(hba->intr_arg));
6198
6199 cv_init(&EMLXS_LINKUP_CV, NULL, CV_DRIVER, NULL);
6200
6201 mutex_init(&EMLXS_TX_CHANNEL_LOCK, NULL, MUTEX_DRIVER,
6202 DDI_INTR_PRI(hba->intr_arg));
6203
6204 for (i = 0; i < MAX_RINGS; i++) {
6205 mutex_init(&EMLXS_CMD_RING_LOCK(i), NULL, MUTEX_DRIVER,
6206 DDI_INTR_PRI(hba->intr_arg));
6207 }
6208
6209
6210 for (i = 0; i < EMLXS_MAX_WQS; i++) {
6211 mutex_init(&EMLXS_QUE_LOCK(i), NULL, MUTEX_DRIVER,
6212 DDI_INTR_PRI(hba->intr_arg));
6213 }
6214
6215 mutex_init(&EMLXS_MSIID_LOCK, NULL, MUTEX_DRIVER,
6216 DDI_INTR_PRI(hba->intr_arg));
6217
6218 mutex_init(&EMLXS_FCTAB_LOCK, NULL, MUTEX_DRIVER,
6219 DDI_INTR_PRI(hba->intr_arg));
6220
6221 mutex_init(&EMLXS_MEMGET_LOCK, NULL, MUTEX_DRIVER,
6222 DDI_INTR_PRI(hba->intr_arg));
6223
6224 mutex_init(&EMLXS_MEMPUT_LOCK, NULL, MUTEX_DRIVER,
6225 DDI_INTR_PRI(hba->intr_arg));
6226
6227 mutex_init(&EMLXS_IOCTL_LOCK, NULL, MUTEX_DRIVER,
6228 DDI_INTR_PRI(hba->intr_arg));
6229
6230 #ifdef DUMP_SUPPORT
6231 mutex_init(&EMLXS_DUMP_LOCK, NULL, MUTEX_DRIVER,
6232 DDI_INTR_PRI(hba->intr_arg));
6233 #endif /* DUMP_SUPPORT */
6234
6235 mutex_init(&EMLXS_SPAWN_LOCK, NULL, MUTEX_DRIVER,
6236 DDI_INTR_PRI(hba->intr_arg));
6237
6238 /* Create per port locks */
6239 for (i = 0; i < MAX_VPORTS; i++) {
6240 port = &VPORT(i);
6241
6242 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
6243
6244 if (i == 0) {
6245 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6246 DDI_INTR_PRI(hba->intr_arg));
6247
6248 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6249
6250 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6251 DDI_INTR_PRI(hba->intr_arg));
6252 } else {
6253 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6254 DDI_INTR_PRI(hba->intr_arg));
6255
6256 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6257
6258 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6259 DDI_INTR_PRI(hba->intr_arg));
6260 }
6261 }
6262
6263 return;
6264
6265 } /* emlxs_lock_init() */
6266
6267
6268
6269 static void
emlxs_lock_destroy(emlxs_hba_t * hba)6270 emlxs_lock_destroy(emlxs_hba_t *hba)
6271 {
6272 emlxs_port_t *port = &PPORT;
6273 uint32_t i;
6274
6275 mutex_destroy(&EMLXS_TIMER_LOCK);
6276 cv_destroy(&hba->timer_lock_cv);
6277
6278 mutex_destroy(&EMLXS_PORT_LOCK);
6279
6280 cv_destroy(&EMLXS_MBOX_CV);
6281 cv_destroy(&EMLXS_LINKUP_CV);
6282
6283 mutex_destroy(&EMLXS_LINKUP_LOCK);
6284 mutex_destroy(&EMLXS_MBOX_LOCK);
6285
6286 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
6287
6288 for (i = 0; i < MAX_RINGS; i++) {
6289 mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
6290 }
6291
6292 for (i = 0; i < EMLXS_MAX_WQS; i++) {
6293 mutex_destroy(&EMLXS_QUE_LOCK(i));
6294 }
6295
6296 mutex_destroy(&EMLXS_MSIID_LOCK);
6297
6298 mutex_destroy(&EMLXS_FCTAB_LOCK);
6299 mutex_destroy(&EMLXS_MEMGET_LOCK);
6300 mutex_destroy(&EMLXS_MEMPUT_LOCK);
6301 mutex_destroy(&EMLXS_IOCTL_LOCK);
6302 mutex_destroy(&EMLXS_SPAWN_LOCK);
6303 mutex_destroy(&EMLXS_PM_LOCK);
6304
6305 #ifdef DUMP_SUPPORT
6306 mutex_destroy(&EMLXS_DUMP_LOCK);
6307 #endif /* DUMP_SUPPORT */
6308
6309 /* Destroy per port locks */
6310 for (i = 0; i < MAX_VPORTS; i++) {
6311 port = &VPORT(i);
6312 rw_destroy(&port->node_rwlock);
6313 mutex_destroy(&EMLXS_PKT_LOCK);
6314 cv_destroy(&EMLXS_PKT_CV);
6315 mutex_destroy(&EMLXS_UB_LOCK);
6316 }
6317
6318 return;
6319
6320 } /* emlxs_lock_destroy() */
6321
6322
6323 /* init_flag values */
6324 #define ATTACH_SOFT_STATE 0x00000001
6325 #define ATTACH_FCA_TRAN 0x00000002
6326 #define ATTACH_HBA 0x00000004
6327 #define ATTACH_LOG 0x00000008
6328 #define ATTACH_MAP_BUS 0x00000010
6329 #define ATTACH_INTR_INIT 0x00000020
6330 #define ATTACH_PROP 0x00000040
6331 #define ATTACH_LOCK 0x00000080
6332 #define ATTACH_THREAD 0x00000100
6333 #define ATTACH_INTR_ADD 0x00000200
6334 #define ATTACH_ONLINE 0x00000400
6335 #define ATTACH_NODE 0x00000800
6336 #define ATTACH_FCT 0x00001000
6337 #define ATTACH_FCA 0x00002000
6338 #define ATTACH_KSTAT 0x00004000
6339 #define ATTACH_DHCHAP 0x00008000
6340 #define ATTACH_FM 0x00010000
6341 #define ATTACH_MAP_SLI 0x00020000
6342 #define ATTACH_SPAWN 0x00040000
6343 #define ATTACH_EVENTS 0x00080000
6344
6345 static void
emlxs_driver_remove(dev_info_t * dip,uint32_t init_flag,uint32_t failed)6346 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
6347 {
6348 emlxs_hba_t *hba = NULL;
6349 int ddiinst;
6350
6351 ddiinst = ddi_get_instance(dip);
6352
6353 if (init_flag & ATTACH_HBA) {
6354 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6355
6356 if (init_flag & ATTACH_SPAWN) {
6357 emlxs_thread_spawn_destroy(hba);
6358 }
6359
6360 if (init_flag & ATTACH_EVENTS) {
6361 (void) emlxs_event_queue_destroy(hba);
6362 }
6363
6364 if (init_flag & ATTACH_ONLINE) {
6365 (void) emlxs_offline(hba, 1);
6366 }
6367
6368 if (init_flag & ATTACH_INTR_ADD) {
6369 (void) EMLXS_INTR_REMOVE(hba);
6370 }
6371 #ifdef SFCT_SUPPORT
6372 if (init_flag & ATTACH_FCT) {
6373 emlxs_fct_detach(hba);
6374 emlxs_fct_modclose();
6375 }
6376 #endif /* SFCT_SUPPORT */
6377
6378 #ifdef DHCHAP_SUPPORT
6379 if (init_flag & ATTACH_DHCHAP) {
6380 emlxs_dhc_detach(hba);
6381 }
6382 #endif /* DHCHAP_SUPPORT */
6383
6384 if (init_flag & ATTACH_KSTAT) {
6385 kstat_delete(hba->kstat);
6386 }
6387
6388 if (init_flag & ATTACH_FCA) {
6389 emlxs_fca_detach(hba);
6390 }
6391
6392 if (init_flag & ATTACH_NODE) {
6393 (void) ddi_remove_minor_node(hba->dip, "devctl");
6394 }
6395
6396 if (init_flag & ATTACH_THREAD) {
6397 emlxs_thread_destroy(&hba->iodone_thread);
6398 }
6399
6400 if (init_flag & ATTACH_PROP) {
6401 (void) ddi_prop_remove_all(hba->dip);
6402 }
6403
6404 if (init_flag & ATTACH_LOCK) {
6405 emlxs_lock_destroy(hba);
6406 }
6407
6408 if (init_flag & ATTACH_INTR_INIT) {
6409 (void) EMLXS_INTR_UNINIT(hba);
6410 }
6411
6412 if (init_flag & ATTACH_MAP_BUS) {
6413 emlxs_unmap_bus(hba);
6414 }
6415
6416 if (init_flag & ATTACH_MAP_SLI) {
6417 EMLXS_SLI_UNMAP_HDW(hba);
6418 }
6419
6420 #ifdef FMA_SUPPORT
6421 if (init_flag & ATTACH_FM) {
6422 emlxs_fm_fini(hba);
6423 }
6424 #endif /* FMA_SUPPORT */
6425
6426 if (init_flag & ATTACH_LOG) {
6427 emlxs_msg_log_destroy(hba);
6428 }
6429
6430 if (init_flag & ATTACH_FCA_TRAN) {
6431 (void) ddi_set_driver_private(hba->dip, NULL);
6432 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
6433 hba->fca_tran = NULL;
6434 }
6435
6436 if (init_flag & ATTACH_HBA) {
6437 emlxs_device.log[hba->emlxinst] = 0;
6438 emlxs_device.hba[hba->emlxinst] =
6439 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
6440 #ifdef DUMP_SUPPORT
6441 emlxs_device.dump_txtfile[hba->emlxinst] = 0;
6442 emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
6443 emlxs_device.dump_ceefile[hba->emlxinst] = 0;
6444 #endif /* DUMP_SUPPORT */
6445
6446 }
6447 }
6448
6449 if (init_flag & ATTACH_SOFT_STATE) {
6450 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
6451 }
6452
6453 return;
6454
6455 } /* emlxs_driver_remove() */
6456
6457
6458 /* This determines which ports will be initiator mode */
6459 static uint32_t
emlxs_fca_init(emlxs_hba_t * hba)6460 emlxs_fca_init(emlxs_hba_t *hba)
6461 {
6462 emlxs_port_t *port = &PPORT;
6463
6464 /* Check if SFS present */
6465 if (((void *)MODSYM(fc_fca_init) == NULL) ||
6466 ((void *)MODSYM(fc_fca_attach) == NULL)) {
6467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6468 "SFS not present.");
6469 return (1);
6470 }
6471
6472 /* Check if our SFS driver interface matches the current SFS stack */
6473 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
6474 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6475 "SFS/FCA version mismatch. FCA=0x%x",
6476 hba->fca_tran->fca_version);
6477 return (1);
6478 }
6479
6480 return (0);
6481
6482 } /* emlxs_fca_init() */
6483
6484
6485 /* This determines which ports will be initiator or target mode */
6486 static void
emlxs_mode_init(emlxs_hba_t * hba)6487 emlxs_mode_init(emlxs_hba_t *hba)
6488 {
6489 emlxs_port_t *port = &PPORT;
6490 emlxs_config_t *cfg = &CFG;
6491 emlxs_port_t *vport;
6492 uint32_t i;
6493 uint32_t mode_mask;
6494
6495 /* Initialize mode masks */
6496 (void) emlxs_mode_init_masks(hba);
6497
6498 if (!(port->mode_mask & MODE_INITIATOR)) {
6499 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6500 "Initiator mode not enabled.");
6501
6502 #ifdef SFCT_SUPPORT
6503 /* Disable dynamic target mode */
6504 cfg[CFG_DTM_ENABLE].current = 0;
6505 #endif /* SFCT_SUPPORT */
6506
6507 goto done1;
6508 }
6509
6510 /* Try to initialize fca interface */
6511 if (emlxs_fca_init(hba) != 0) {
6512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6513 "Initiator mode disabled.");
6514
6515 /* Disable initiator mode */
6516 port->mode_mask &= ~MODE_INITIATOR;
6517
6518 #ifdef SFCT_SUPPORT
6519 /* Disable dynamic target mode */
6520 cfg[CFG_DTM_ENABLE].current = 0;
6521 #endif /* SFCT_SUPPORT */
6522
6523 goto done1;
6524 }
6525
6526 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6527 "Initiator mode enabled.");
6528
6529 done1:
6530
6531 #ifdef SFCT_SUPPORT
6532 if (!(port->mode_mask & MODE_TARGET)) {
6533 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6534 "Target mode not enabled.");
6535
6536 /* Disable target modes */
6537 cfg[CFG_DTM_ENABLE].current = 0;
6538 cfg[CFG_TARGET_MODE].current = 0;
6539
6540 goto done2;
6541 }
6542
6543 /* Try to open the COMSTAR module */
6544 if (emlxs_fct_modopen() != 0) {
6545 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6546 "Target mode disabled.");
6547
6548 /* Disable target modes */
6549 port->mode_mask &= ~MODE_TARGET;
6550 cfg[CFG_DTM_ENABLE].current = 0;
6551 cfg[CFG_TARGET_MODE].current = 0;
6552
6553 goto done2;
6554 }
6555
6556 /* Try to initialize fct interface */
6557 if (emlxs_fct_init(hba) != 0) {
6558 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6559 "Target mode disabled.");
6560
6561 /* Disable target modes */
6562 port->mode_mask &= ~MODE_TARGET;
6563 cfg[CFG_DTM_ENABLE].current = 0;
6564 cfg[CFG_TARGET_MODE].current = 0;
6565
6566 goto done2;
6567 }
6568
6569 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6570 "Target mode enabled.");
6571
6572 done2:
6573 /* Adjust target mode parameter flags */
6574 if (cfg[CFG_DTM_ENABLE].current) {
6575 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6576 "Dynamic target mode enabled.");
6577
6578 cfg[CFG_TARGET_MODE].flags |= PARM_DYNAMIC;
6579 } else {
6580 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6581 "Dynamic target mode disabled.");
6582
6583 cfg[CFG_TARGET_MODE].flags &= ~PARM_DYNAMIC;
6584 }
6585 #endif /* SFCT_SUPPORT */
6586
6587 /* Now set port flags */
6588 mutex_enter(&EMLXS_PORT_LOCK);
6589
6590 /* Set flags for physical port */
6591 if (port->mode_mask & MODE_INITIATOR) {
6592 port->flag |= EMLXS_INI_ENABLED;
6593 } else {
6594 port->flag &= ~EMLXS_INI_ENABLED;
6595 }
6596
6597 if (port->mode_mask & MODE_TARGET) {
6598 port->flag |= EMLXS_TGT_ENABLED;
6599 } else {
6600 port->flag &= ~EMLXS_TGT_ENABLED;
6601 }
6602
6603 for (i = 1; i < MAX_VPORTS; i++) {
6604 vport = &VPORT(i);
6605
6606 /* Physical port mask has only allowable bits */
6607 mode_mask = vport->mode_mask & port->mode_mask;
6608
6609 /* Set flags for physical port */
6610 if (mode_mask & MODE_INITIATOR) {
6611 vport->flag |= EMLXS_INI_ENABLED;
6612 } else {
6613 vport->flag &= ~EMLXS_INI_ENABLED;
6614 }
6615
6616 if (mode_mask & MODE_TARGET) {
6617 vport->flag |= EMLXS_TGT_ENABLED;
6618 } else {
6619 vport->flag &= ~EMLXS_TGT_ENABLED;
6620 }
6621 }
6622
6623 /* Set initial driver mode */
6624 emlxs_mode_set(hba);
6625
6626 mutex_exit(&EMLXS_PORT_LOCK);
6627
6628 /* Recheck possible mode dependent parameters */
6629 /* in case conditions have changed. */
6630 if (port->mode != MODE_NONE) {
6631 for (i = 0; i < NUM_CFG_PARAM; i++) {
6632 cfg = &hba->config[i];
6633 cfg->current = emlxs_check_parm(hba, i, cfg->current);
6634 }
6635 }
6636
6637 return;
6638
6639 } /* emlxs_mode_init() */
6640
6641
6642 /* This must be called while holding the EMLXS_PORT_LOCK */
6643 extern void
emlxs_mode_set(emlxs_hba_t * hba)6644 emlxs_mode_set(emlxs_hba_t *hba)
6645 {
6646 emlxs_port_t *port = &PPORT;
6647 #ifdef SFCT_SUPPORT
6648 emlxs_config_t *cfg = &CFG;
6649 #endif /* SFCT_SUPPORT */
6650 emlxs_port_t *vport;
6651 uint32_t i;
6652 uint32_t cfg_tgt_mode = 0;
6653
6654 /* mutex_enter(&EMLXS_PORT_LOCK); */
6655
6656 #ifdef SFCT_SUPPORT
6657 cfg_tgt_mode = cfg[CFG_TARGET_MODE].current;
6658 #endif /* SFCT_SUPPORT */
6659
6660 /* Initiator mode requested */
6661 if (!cfg_tgt_mode) {
6662 for (i = 0; i < MAX_VPORTS; i++) {
6663 vport = &VPORT(i);
6664 vport->mode = (vport->flag & EMLXS_INI_ENABLED)?
6665 MODE_INITIATOR:MODE_NONE;
6666 }
6667 #ifdef SFCT_SUPPORT
6668 /* Target mode requested */
6669 } else {
6670 for (i = 0; i < MAX_VPORTS; i++) {
6671 vport = &VPORT(i);
6672 vport->mode = (vport->flag & EMLXS_TGT_ENABLED)?
6673 MODE_TARGET:MODE_NONE;
6674 }
6675 #endif /* SFCT_SUPPORT */
6676 }
6677
6678 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6679 "MODE: %s", emlxs_mode_xlate(port->mode));
6680
6681 /* mutex_exit(&EMLXS_PORT_LOCK); */
6682
6683 return;
6684
6685 } /* emlxs_mode_set() */
6686
6687
6688 static void
emlxs_mode_init_masks(emlxs_hba_t * hba)6689 emlxs_mode_init_masks(emlxs_hba_t *hba)
6690 {
6691 emlxs_port_t *port = &PPORT;
6692 emlxs_port_t *vport;
6693 uint32_t i;
6694
6695 #ifdef SFCT_SUPPORT
6696 emlxs_config_t *cfg = &CFG;
6697 uint32_t vport_mode_mask;
6698 uint32_t cfg_vport_mode_mask;
6699 uint32_t mode_mask;
6700 char string[256];
6701
6702 port->mode_mask = 0;
6703
6704 if (!cfg[CFG_TARGET_MODE].current ||
6705 cfg[CFG_DTM_ENABLE].current) {
6706 port->mode_mask |= MODE_INITIATOR;
6707 }
6708
6709 if (cfg[CFG_TARGET_MODE].current ||
6710 cfg[CFG_DTM_ENABLE].current) {
6711 port->mode_mask |= MODE_TARGET;
6712 }
6713
6714 /* Physical port mask has only allowable bits */
6715 vport_mode_mask = port->mode_mask;
6716 cfg_vport_mode_mask = cfg[CFG_VPORT_MODE_MASK].current;
6717
6718 /* Check dynamic target mode value for virtual ports */
6719 if (cfg[CFG_DTM_ENABLE].current == 0) {
6720 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6721 "%s = 0: Virtual target ports are not supported.",
6722 cfg[CFG_DTM_ENABLE].string);
6723
6724 vport_mode_mask &= ~MODE_TARGET;
6725 }
6726
6727 cfg_vport_mode_mask &= vport_mode_mask;
6728
6729 if (cfg[CFG_VPORT_MODE_MASK].current != cfg_vport_mode_mask) {
6730 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6731 "%s: Changing 0x%x --> 0x%x",
6732 cfg[CFG_VPORT_MODE_MASK].string,
6733 cfg[CFG_VPORT_MODE_MASK].current,
6734 cfg_vport_mode_mask);
6735
6736 cfg[CFG_VPORT_MODE_MASK].current = cfg_vport_mode_mask;
6737 }
6738
6739 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6740 "pport-mode-mask: %s", emlxs_mode_xlate(port->mode_mask));
6741
6742 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6743 "vport-mode-mask: %s", emlxs_mode_xlate(cfg_vport_mode_mask));
6744
6745 for (i = 1; i < MAX_VPORTS; i++) {
6746 vport = &VPORT(i);
6747
6748 (void) snprintf(string, sizeof (string),
6749 "%s%d-vport%d-mode-mask", DRIVER_NAME, hba->ddiinst, i);
6750
6751 mode_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6752 (void *)hba->dip, DDI_PROP_DONTPASS, string,
6753 cfg_vport_mode_mask);
6754
6755 vport->mode_mask = mode_mask & vport_mode_mask;
6756
6757 if (vport->mode_mask != cfg_vport_mode_mask) {
6758 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6759 "vport%d-mode-mask: %s",
6760 i, emlxs_mode_xlate(vport->mode_mask));
6761 }
6762 }
6763 #else
6764 port->mode_mask = MODE_INITIATOR;
6765 for (i = 1; i < MAX_VPORTS; i++) {
6766 vport = &VPORT(i);
6767 vport->mode_mask = MODE_INITIATOR;
6768 }
6769 #endif /* SFCT_SUPPORT */
6770
6771 return;
6772
6773 } /* emlxs_mode_init_masks() */
6774
6775
6776 static void
emlxs_fca_attach(emlxs_hba_t * hba)6777 emlxs_fca_attach(emlxs_hba_t *hba)
6778 {
6779 emlxs_port_t *port;
6780 uint32_t i;
6781
6782 /* Update our transport structure */
6783 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg;
6784 hba->fca_tran->fca_cmd_max = hba->io_throttle;
6785
6786 for (i = 0; i < MAX_VPORTS; i++) {
6787 port = &VPORT(i);
6788 port->ub_count = EMLXS_UB_TOKEN_OFFSET;
6789 port->ub_pool = NULL;
6790 }
6791
6792 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6793 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6794 sizeof (NAME_TYPE));
6795 #endif /* >= EMLXS_MODREV5 */
6796
6797 return;
6798
6799 } /* emlxs_fca_attach() */
6800
6801
6802 static void
emlxs_fca_detach(emlxs_hba_t * hba)6803 emlxs_fca_detach(emlxs_hba_t *hba)
6804 {
6805 emlxs_port_t *port = &PPORT;
6806 uint32_t i;
6807 emlxs_port_t *vport;
6808
6809 if (!(port->flag & EMLXS_INI_ENABLED)) {
6810 return;
6811 }
6812
6813 if ((void *)MODSYM(fc_fca_detach) != NULL) {
6814 MODSYM(fc_fca_detach)(hba->dip);
6815 }
6816
6817 /* Disable INI mode for all ports */
6818 for (i = 0; i < MAX_VPORTS; i++) {
6819 vport = &VPORT(i);
6820 vport->flag &= ~EMLXS_INI_ENABLED;
6821 }
6822
6823 return;
6824
6825 } /* emlxs_fca_detach() */
6826
6827
6828 static void
emlxs_drv_banner(emlxs_hba_t * hba)6829 emlxs_drv_banner(emlxs_hba_t *hba)
6830 {
6831 emlxs_port_t *port = &PPORT;
6832 uint32_t i;
6833 char sli_mode[16];
6834 char msi_mode[16];
6835 char npiv_mode[16];
6836 emlxs_vpd_t *vpd = &VPD;
6837 uint8_t *wwpn;
6838 uint8_t *wwnn;
6839 uint32_t fw_show = 0;
6840
6841 /* Display firmware library one time for all driver instances */
6842 mutex_enter(&emlxs_device.lock);
6843 if (!(emlxs_instance_flag & EMLXS_FW_SHOW)) {
6844 emlxs_instance_flag |= EMLXS_FW_SHOW;
6845 fw_show = 1;
6846 }
6847 mutex_exit(&emlxs_device.lock);
6848
6849 if (fw_show) {
6850 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s",
6851 emlxs_copyright);
6852 emlxs_fw_show(hba);
6853 }
6854
6855 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6856 emlxs_revision);
6857
6858 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6859 "%s Ven_id:%x Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6860 hba->model_info.vendor_id, hba->model_info.device_id,
6861 hba->model_info.ssdid, hba->model_info.id);
6862
6863 #ifdef EMLXS_I386
6864
6865 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6866 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6867 vpd->boot_version);
6868
6869 #else /* EMLXS_SPARC */
6870
6871 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6872 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6873 vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6874
6875 #endif /* EMLXS_I386 */
6876
6877 if (hba->sli_mode > 3) {
6878 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d(%s)",
6879 hba->sli_mode,
6880 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6881 } else {
6882 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d",
6883 hba->sli_mode);
6884 }
6885
6886 (void) strlcpy(msi_mode, " INTX:1", sizeof (msi_mode));
6887
6888 #ifdef MSI_SUPPORT
6889 if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6890 switch (hba->intr_type) {
6891 case DDI_INTR_TYPE_FIXED:
6892 (void) strlcpy(msi_mode, " MSI:0", sizeof (msi_mode));
6893 break;
6894
6895 case DDI_INTR_TYPE_MSI:
6896 (void) snprintf(msi_mode, sizeof (msi_mode), " MSI:%d",
6897 hba->intr_count);
6898 break;
6899
6900 case DDI_INTR_TYPE_MSIX:
6901 (void) snprintf(msi_mode, sizeof (msi_mode), " MSIX:%d",
6902 hba->intr_count);
6903 break;
6904 }
6905 }
6906 #endif /* MSI_SUPPORT */
6907
6908 (void) strlcpy(npiv_mode, "", sizeof (npiv_mode));
6909
6910 if (hba->flag & FC_NPIV_ENABLED) {
6911 (void) snprintf(npiv_mode, sizeof (npiv_mode), " NPIV:%d",
6912 hba->vpi_max+1);
6913 } else {
6914 (void) strlcpy(npiv_mode, " NPIV:0", sizeof (npiv_mode));
6915 }
6916
6917 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
6918 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s%s",
6919 sli_mode, msi_mode, npiv_mode,
6920 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6921 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""),
6922 ((SLI4_FCOE_MODE)? " FCoE":" FC"));
6923 } else {
6924 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6925 sli_mode, msi_mode, npiv_mode,
6926 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6927 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""));
6928 }
6929
6930 wwpn = (uint8_t *)&hba->wwpn;
6931 wwnn = (uint8_t *)&hba->wwnn;
6932 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6933 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6934 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6935 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6936 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6937 wwnn[6], wwnn[7]);
6938
6939 for (i = 0; i < MAX_VPORTS; i++) {
6940 port = &VPORT(i);
6941
6942 if (!(port->flag & EMLXS_PORT_CONFIG)) {
6943 continue;
6944 }
6945
6946 wwpn = (uint8_t *)&port->wwpn;
6947 wwnn = (uint8_t *)&port->wwnn;
6948
6949 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6950 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6951 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6952 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6953 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6954 wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6955 }
6956
6957 /*
6958 * Announce the device: ddi_report_dev() prints a banner at boot time,
6959 * announcing the device pointed to by dip.
6960 */
6961 (void) ddi_report_dev(hba->dip);
6962
6963 return;
6964
6965 } /* emlxs_drv_banner() */
6966
6967
6968 extern void
emlxs_get_fcode_version(emlxs_hba_t * hba)6969 emlxs_get_fcode_version(emlxs_hba_t *hba)
6970 {
6971 emlxs_vpd_t *vpd = &VPD;
6972 char *prop_str;
6973 int status;
6974
6975 /* Setup fcode version property */
6976 prop_str = NULL;
6977 status =
6978 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6979 "fcode-version", (char **)&prop_str);
6980
6981 if (status == DDI_PROP_SUCCESS) {
6982 bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6983 (void) ddi_prop_free((void *)prop_str);
6984 } else {
6985 (void) strncpy(vpd->fcode_version, "none",
6986 (sizeof (vpd->fcode_version)-1));
6987 }
6988
6989 return;
6990
6991 } /* emlxs_get_fcode_version() */
6992
6993
6994 static int
emlxs_hba_attach(dev_info_t * dip)6995 emlxs_hba_attach(dev_info_t *dip)
6996 {
6997 emlxs_hba_t *hba;
6998 emlxs_port_t *port;
6999 emlxs_config_t *cfg;
7000 char *prop_str;
7001 int ddiinst;
7002 int32_t emlxinst;
7003 int status;
7004 uint32_t rval;
7005 uint32_t init_flag = 0;
7006 char local_pm_components[32];
7007 uint32_t i;
7008
7009 ddiinst = ddi_get_instance(dip);
7010 emlxinst = emlxs_add_instance(ddiinst);
7011
7012 if (emlxinst >= MAX_FC_BRDS) {
7013 cmn_err(CE_WARN,
7014 "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
7015 "inst=%x", DRIVER_NAME, ddiinst);
7016 return (DDI_FAILURE);
7017 }
7018
7019 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
7020 return (DDI_FAILURE);
7021 }
7022
7023 if (emlxs_device.hba[emlxinst]) {
7024 return (DDI_SUCCESS);
7025 }
7026
7027 /* An adapter can accidentally be plugged into a slave-only PCI slot */
7028 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
7029 cmn_err(CE_WARN,
7030 "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
7031 DRIVER_NAME, ddiinst);
7032 return (DDI_FAILURE);
7033 }
7034
7035 /* Allocate emlxs_dev_ctl structure. */
7036 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
7037 cmn_err(CE_WARN,
7038 "?%s%d: fca_hba_attach failed. Unable to allocate soft "
7039 "state.", DRIVER_NAME, ddiinst);
7040 return (DDI_FAILURE);
7041 }
7042 init_flag |= ATTACH_SOFT_STATE;
7043
7044 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
7045 ddiinst)) == NULL) {
7046 cmn_err(CE_WARN,
7047 "?%s%d: fca_hba_attach failed. Unable to get soft state.",
7048 DRIVER_NAME, ddiinst);
7049 goto failed;
7050 }
7051 bzero((char *)hba, sizeof (emlxs_hba_t));
7052
7053 emlxs_device.hba[emlxinst] = hba;
7054 emlxs_device.log[emlxinst] = &hba->log;
7055
7056 #ifdef DUMP_SUPPORT
7057 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
7058 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
7059 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
7060 #endif /* DUMP_SUPPORT */
7061
7062 hba->dip = dip;
7063 hba->emlxinst = emlxinst;
7064 hba->ddiinst = ddiinst;
7065
7066 init_flag |= ATTACH_HBA;
7067
7068 /* Enable the physical port on this HBA */
7069 port = &PPORT;
7070 port->hba = hba;
7071 port->vpi = 0;
7072 port->flag |= EMLXS_PORT_ENABLED;
7073
7074 /* Allocate a transport structure */
7075 hba->fca_tran =
7076 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
7077 if (hba->fca_tran == NULL) {
7078 cmn_err(CE_WARN,
7079 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
7080 "memory.", DRIVER_NAME, ddiinst);
7081 goto failed;
7082 }
7083 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
7084 sizeof (fc_fca_tran_t));
7085
7086 /*
7087 * Copy the global ddi_dma_attr to the local hba fields
7088 */
7089 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
7090 sizeof (ddi_dma_attr_t));
7091 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
7092 sizeof (ddi_dma_attr_t));
7093 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
7094 sizeof (ddi_dma_attr_t));
7095 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
7096 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
7097
7098 /* Reset the fca_tran dma_attr fields to the per-hba copies */
7099 hba->fca_tran->fca_dma_attr = &hba->dma_attr;
7100 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
7101 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
7102 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
7103 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
7104 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
7105 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
7106 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
7107
7108 /* Set the transport structure pointer in our dip */
7109 /* SFS may panic if we are in target only mode */
7110 /* We will update the transport structure later */
7111 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
7112 init_flag |= ATTACH_FCA_TRAN;
7113
7114 /* Perform driver integrity check */
7115 rval = emlxs_integrity_check(hba);
7116 if (rval) {
7117 cmn_err(CE_WARN,
7118 "?%s%d: fca_hba_attach failed. Driver integrity check "
7119 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
7120 goto failed;
7121 }
7122
7123 cfg = &CFG;
7124
7125 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
7126 /*
7127 * Gen7 chips respond with unknown command so we disable heartbeat
7128 * it can be re enabled in emlxs.conf
7129 */
7130 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_6)
7131 cfg[CFG_HEARTBEAT_ENABLE].current = 0;
7132
7133 #ifdef MSI_SUPPORT
7134 if ((void *)&ddi_intr_get_supported_types != NULL) {
7135 hba->intr_flags |= EMLXS_MSI_ENABLED;
7136 }
7137 #endif /* MSI_SUPPORT */
7138
7139
7140 /* Create the msg log file */
7141 if (emlxs_msg_log_create(hba) == 0) {
7142 cmn_err(CE_WARN,
7143 "?%s%d: fca_hba_attach failed. Unable to create message "
7144 "log", DRIVER_NAME, ddiinst);
7145 goto failed;
7146
7147 }
7148 init_flag |= ATTACH_LOG;
7149
7150 /* We can begin to use EMLXS_MSGF from this point on */
7151
7152 /*
7153 * Find the I/O bus type If it is not a SBUS card,
7154 * then it is a PCI card. Default is PCI_FC (0).
7155 */
7156 prop_str = NULL;
7157 status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
7158 (dev_info_t *)dip, 0, "name", (char **)&prop_str);
7159
7160 if (status == DDI_PROP_SUCCESS) {
7161 if (strncmp(prop_str, "lpfs", 4) == 0) {
7162 hba->bus_type = SBUS_FC;
7163 }
7164
7165 (void) ddi_prop_free((void *)prop_str);
7166 }
7167
7168 /*
7169 * Copy DDS from the config method and update configuration parameters
7170 */
7171 (void) emlxs_get_props(hba);
7172
7173 #ifdef FMA_SUPPORT
7174 hba->fm_caps = cfg[CFG_FM_CAPS].current;
7175
7176 emlxs_fm_init(hba);
7177
7178 init_flag |= ATTACH_FM;
7179 #endif /* FMA_SUPPORT */
7180
7181 if (emlxs_map_bus(hba)) {
7182 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7183 "Unable to map memory");
7184 goto failed;
7185
7186 }
7187 init_flag |= ATTACH_MAP_BUS;
7188
7189 /* Attempt to identify the adapter */
7190 rval = emlxs_init_adapter_info(hba);
7191
7192 if (rval == 0) {
7193 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7194 "Unable to get adapter info. Id:%d Vendor id:0x%x "
7195 "Device id:0x%x Model:%s", hba->model_info.id,
7196 hba->model_info.vendor_id, hba->model_info.device_id,
7197 hba->model_info.model);
7198 goto failed;
7199 }
7200
7201 /* Check if adapter is not supported */
7202 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
7203 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7204 "Unsupported adapter found. Id:%d Vendor id:0x%x "
7205 "Device id:0x%x SSDID:0x%x Model:%s", hba->model_info.id,
7206 hba->model_info.vendor_id, hba->model_info.device_id,
7207 hba->model_info.ssdid, hba->model_info.model);
7208 goto failed;
7209 }
7210
7211 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
7212 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
7213
7214 #ifdef EMLXS_I386
7215 /*
7216 * TigerShark has 64K limit for SG element size
7217 * Do this for x86 alone. For SPARC, the driver
7218 * breaks up the single SGE later on.
7219 */
7220 hba->dma_attr_ro.dma_attr_count_max = 0xffff;
7221
7222 i = cfg[CFG_MAX_XFER_SIZE].current;
7223 /* Update SGL size based on max_xfer_size */
7224 if (i > 516096) {
7225 /* 516096 = (((2048 / 16) - 2) * 4096) */
7226 hba->sli.sli4.mem_sgl_size = 4096;
7227 } else if (i > 253952) {
7228 /* 253952 = (((1024 / 16) - 2) * 4096) */
7229 hba->sli.sli4.mem_sgl_size = 2048;
7230 } else {
7231 hba->sli.sli4.mem_sgl_size = 1024;
7232 }
7233 #endif /* EMLXS_I386 */
7234
7235 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
7236 } else {
7237 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
7238
7239 #ifdef EMLXS_I386
7240 i = cfg[CFG_MAX_XFER_SIZE].current;
7241 /* Update BPL size based on max_xfer_size */
7242 if (i > 688128) {
7243 /* 688128 = (((2048 / 12) - 2) * 4096) */
7244 hba->sli.sli3.mem_bpl_size = 4096;
7245 } else if (i > 339968) {
7246 /* 339968 = (((1024 / 12) - 2) * 4096) */
7247 hba->sli.sli3.mem_bpl_size = 2048;
7248 } else {
7249 hba->sli.sli3.mem_bpl_size = 1024;
7250 }
7251 #endif /* EMLXS_I386 */
7252
7253 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
7254 }
7255
7256 /* Update dma_attr_sgllen based on true SGL length */
7257 hba->dma_attr.dma_attr_sgllen = i;
7258 hba->dma_attr_ro.dma_attr_sgllen = i;
7259 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
7260
7261 if (EMLXS_SLI_MAP_HDW(hba)) {
7262 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7263 "Unable to map memory");
7264 goto failed;
7265
7266 }
7267 init_flag |= ATTACH_MAP_SLI;
7268
7269 /* Initialize the interrupts. But don't add them yet */
7270 status = EMLXS_INTR_INIT(hba, 0);
7271 if (status != DDI_SUCCESS) {
7272 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7273 "Unable to initalize interrupt(s).");
7274 goto failed;
7275
7276 }
7277 init_flag |= ATTACH_INTR_INIT;
7278
7279 /* Initialize LOCKs */
7280 emlxs_msg_lock_reinit(hba);
7281 emlxs_lock_init(hba);
7282 init_flag |= ATTACH_LOCK;
7283
7284 /* Create the event queue */
7285 if (emlxs_event_queue_create(hba) == 0) {
7286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7287 "Unable to create event queue");
7288
7289 goto failed;
7290
7291 }
7292 init_flag |= ATTACH_EVENTS;
7293
7294 /* Initialize the power management */
7295 mutex_enter(&EMLXS_PM_LOCK);
7296 hba->pm_state = EMLXS_PM_IN_ATTACH;
7297 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
7298 hba->pm_busy = 0;
7299 #ifdef IDLE_TIMER
7300 hba->pm_active = 1;
7301 hba->pm_idle_timer = 0;
7302 #endif /* IDLE_TIMER */
7303 mutex_exit(&EMLXS_PM_LOCK);
7304
7305 /* Set the pm component name */
7306 (void) snprintf(local_pm_components, sizeof (local_pm_components),
7307 "NAME=%s%d", DRIVER_NAME, ddiinst);
7308 emlxs_pm_components[0] = local_pm_components;
7309
7310 /* Check if power management support is enabled */
7311 if (cfg[CFG_PM_SUPPORT].current) {
7312 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
7313 "pm-components", emlxs_pm_components,
7314 sizeof (emlxs_pm_components) /
7315 sizeof (emlxs_pm_components[0])) !=
7316 DDI_PROP_SUCCESS) {
7317 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7318 "Unable to create pm components.");
7319 goto failed;
7320 }
7321 }
7322
7323 /* Needed for suspend and resume support */
7324 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
7325 "needs-suspend-resume");
7326 init_flag |= ATTACH_PROP;
7327
7328 emlxs_thread_spawn_create(hba);
7329 init_flag |= ATTACH_SPAWN;
7330
7331 emlxs_thread_create(hba, &hba->iodone_thread);
7332
7333 init_flag |= ATTACH_THREAD;
7334
7335 retry:
7336 /* Setup initiator / target ports */
7337 emlxs_mode_init(hba);
7338
7339 /* If driver did not attach to either stack, */
7340 /* then driver attach fails */
7341 if (port->mode == MODE_NONE) {
7342 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7343 "Driver interfaces not enabled.");
7344 goto failed;
7345 }
7346
7347 /*
7348 * Initialize HBA
7349 */
7350
7351 /* Set initial state */
7352 mutex_enter(&EMLXS_PORT_LOCK);
7353 hba->flag |= FC_OFFLINE_MODE;
7354 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
7355 mutex_exit(&EMLXS_PORT_LOCK);
7356
7357 if (status = emlxs_online(hba)) {
7358 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7359 "Unable to initialize adapter.");
7360
7361 if (status == EAGAIN) {
7362 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7363 "Retrying adapter initialization ...");
7364 goto retry;
7365 }
7366 goto failed;
7367 }
7368 init_flag |= ATTACH_ONLINE;
7369
7370 /* This is to ensure that the model property is properly set */
7371 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
7372 hba->model_info.model);
7373
7374 /* Create the device node. */
7375 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
7376 DDI_FAILURE) {
7377 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7378 "Unable to create device node.");
7379 goto failed;
7380 }
7381 init_flag |= ATTACH_NODE;
7382
7383 /* Attach initiator now */
7384 /* This must come after emlxs_online() */
7385 emlxs_fca_attach(hba);
7386 init_flag |= ATTACH_FCA;
7387
7388 /* Initialize kstat information */
7389 hba->kstat = kstat_create(DRIVER_NAME,
7390 ddiinst, "statistics", "controller",
7391 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
7392 KSTAT_FLAG_VIRTUAL);
7393
7394 if (hba->kstat == NULL) {
7395 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
7396 "kstat_create failed.");
7397 } else {
7398 hba->kstat->ks_data = (void *)&hba->stats;
7399 kstat_install(hba->kstat);
7400 init_flag |= ATTACH_KSTAT;
7401 }
7402
7403 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
7404 /* Setup virtual port properties */
7405 emlxs_read_vport_prop(hba);
7406 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
7407
7408
7409 #ifdef DHCHAP_SUPPORT
7410 emlxs_dhc_attach(hba);
7411 init_flag |= ATTACH_DHCHAP;
7412 #endif /* DHCHAP_SUPPORT */
7413
7414 /* Display the driver banner now */
7415 emlxs_drv_banner(hba);
7416
7417 /* Raise the power level */
7418
7419 /*
7420 * This will not execute emlxs_hba_resume because
7421 * EMLXS_PM_IN_ATTACH is set
7422 */
7423 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
7424 /* Set power up anyway. This should not happen! */
7425 mutex_enter(&EMLXS_PM_LOCK);
7426 hba->pm_level = EMLXS_PM_ADAPTER_UP;
7427 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7428 mutex_exit(&EMLXS_PM_LOCK);
7429 } else {
7430 mutex_enter(&EMLXS_PM_LOCK);
7431 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7432 mutex_exit(&EMLXS_PM_LOCK);
7433 }
7434
7435 #ifdef SFCT_SUPPORT
7436 if (port->flag & EMLXS_TGT_ENABLED) {
7437 /* Do this last */
7438 emlxs_fct_attach(hba);
7439 init_flag |= ATTACH_FCT;
7440 }
7441 #endif /* SFCT_SUPPORT */
7442
7443 return (DDI_SUCCESS);
7444
7445 failed:
7446
7447 emlxs_driver_remove(dip, init_flag, 1);
7448
7449 return (DDI_FAILURE);
7450
7451 } /* emlxs_hba_attach() */
7452
7453
7454 static int
emlxs_hba_detach(dev_info_t * dip)7455 emlxs_hba_detach(dev_info_t *dip)
7456 {
7457 emlxs_hba_t *hba;
7458 emlxs_port_t *port;
7459 int ddiinst;
7460 int count;
7461 uint32_t init_flag = (uint32_t)-1;
7462
7463 ddiinst = ddi_get_instance(dip);
7464 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
7465 port = &PPORT;
7466
7467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
7468
7469 mutex_enter(&EMLXS_PM_LOCK);
7470 hba->pm_state |= EMLXS_PM_IN_DETACH;
7471 mutex_exit(&EMLXS_PM_LOCK);
7472
7473 /* Lower the power level */
7474 /*
7475 * This will not suspend the driver since the
7476 * EMLXS_PM_IN_DETACH has been set
7477 */
7478 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
7479 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7480 "Unable to lower power.");
7481
7482 mutex_enter(&EMLXS_PM_LOCK);
7483 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7484 mutex_exit(&EMLXS_PM_LOCK);
7485
7486 return (DDI_FAILURE);
7487 }
7488
7489 /* Take the adapter offline first, if not already */
7490 if (emlxs_offline(hba, 1) != 0) {
7491 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7492 "Unable to take adapter offline.");
7493
7494 mutex_enter(&EMLXS_PM_LOCK);
7495 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7496 mutex_exit(&EMLXS_PM_LOCK);
7497
7498 (void) emlxs_pm_raise_power(dip);
7499
7500 return (DDI_FAILURE);
7501 }
7502 /* Check ub buffer pools */
7503 if (port->ub_pool) {
7504 mutex_enter(&EMLXS_UB_LOCK);
7505
7506 /* Wait up to 10 seconds for all ub pools to be freed */
7507 count = 10 * 2;
7508 while (port->ub_pool && count) {
7509 mutex_exit(&EMLXS_UB_LOCK);
7510 delay(drv_usectohz(500000)); /* half second wait */
7511 count--;
7512 mutex_enter(&EMLXS_UB_LOCK);
7513 }
7514
7515 if (port->ub_pool) {
7516 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7517 "fca_unbind_port: Unsolicited buffers still "
7518 "active. port=%p. Destroying...", port);
7519
7520 /* Destroy all pools */
7521 while (port->ub_pool) {
7522 emlxs_ub_destroy(port, port->ub_pool);
7523 }
7524 }
7525
7526 mutex_exit(&EMLXS_UB_LOCK);
7527 }
7528 init_flag &= ~ATTACH_ONLINE;
7529
7530 /* Remove the driver instance */
7531 emlxs_driver_remove(dip, init_flag, 0);
7532
7533 return (DDI_SUCCESS);
7534
7535 } /* emlxs_hba_detach() */
7536
7537
7538 extern int
emlxs_map_bus(emlxs_hba_t * hba)7539 emlxs_map_bus(emlxs_hba_t *hba)
7540 {
7541 emlxs_port_t *port = &PPORT;
7542 dev_info_t *dip;
7543 ddi_device_acc_attr_t dev_attr;
7544 int status;
7545
7546 dip = (dev_info_t *)hba->dip;
7547 dev_attr = emlxs_dev_acc_attr;
7548
7549 if (hba->bus_type == SBUS_FC) {
7550 if (hba->pci_acc_handle == 0) {
7551 status = ddi_regs_map_setup(dip,
7552 SBUS_DFLY_PCI_CFG_RINDEX,
7553 (caddr_t *)&hba->pci_addr,
7554 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7555 if (status != DDI_SUCCESS) {
7556 EMLXS_MSGF(EMLXS_CONTEXT,
7557 &emlxs_attach_failed_msg,
7558 "(SBUS) ddi_regs_map_setup PCI failed. "
7559 "status=%x", status);
7560 goto failed;
7561 }
7562 }
7563
7564 if (hba->sbus_pci_handle == 0) {
7565 status = ddi_regs_map_setup(dip,
7566 SBUS_TITAN_PCI_CFG_RINDEX,
7567 (caddr_t *)&hba->sbus_pci_addr,
7568 0, 0, &dev_attr, &hba->sbus_pci_handle);
7569 if (status != DDI_SUCCESS) {
7570 EMLXS_MSGF(EMLXS_CONTEXT,
7571 &emlxs_attach_failed_msg,
7572 "(SBUS) ddi_regs_map_setup TITAN PCI "
7573 "failed. status=%x", status);
7574 goto failed;
7575 }
7576 }
7577
7578 } else { /* ****** PCI ****** */
7579
7580 if (hba->pci_acc_handle == 0) {
7581 status = ddi_regs_map_setup(dip,
7582 PCI_CFG_RINDEX,
7583 (caddr_t *)&hba->pci_addr,
7584 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7585 if (status != DDI_SUCCESS) {
7586 EMLXS_MSGF(EMLXS_CONTEXT,
7587 &emlxs_attach_failed_msg,
7588 "(PCI) ddi_regs_map_setup PCI failed. "
7589 "status=%x", status);
7590 goto failed;
7591 }
7592 }
7593 #ifdef EMLXS_I386
7594 /* Setting up PCI configure space */
7595 (void) ddi_put16(hba->pci_acc_handle,
7596 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
7597 CMD_CFG_VALUE | CMD_IO_ENBL);
7598
7599 #ifdef FMA_SUPPORT
7600 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
7601 != DDI_FM_OK) {
7602 EMLXS_MSGF(EMLXS_CONTEXT,
7603 &emlxs_invalid_access_handle_msg, NULL);
7604 goto failed;
7605 }
7606 #endif /* FMA_SUPPORT */
7607
7608 #endif /* EMLXS_I386 */
7609
7610 }
7611 return (0);
7612
7613 failed:
7614
7615 emlxs_unmap_bus(hba);
7616 return (ENOMEM);
7617
7618 } /* emlxs_map_bus() */
7619
7620
7621 extern void
emlxs_unmap_bus(emlxs_hba_t * hba)7622 emlxs_unmap_bus(emlxs_hba_t *hba)
7623 {
7624 if (hba->pci_acc_handle) {
7625 (void) ddi_regs_map_free(&hba->pci_acc_handle);
7626 hba->pci_acc_handle = 0;
7627 }
7628
7629 if (hba->sbus_pci_handle) {
7630 (void) ddi_regs_map_free(&hba->sbus_pci_handle);
7631 hba->sbus_pci_handle = 0;
7632 }
7633
7634 return;
7635
7636 } /* emlxs_unmap_bus() */
7637
7638
7639 static int
emlxs_get_props(emlxs_hba_t * hba)7640 emlxs_get_props(emlxs_hba_t *hba)
7641 {
7642 emlxs_config_t *cfg;
7643 uint32_t i;
7644 char string[256];
7645 uint32_t new_value;
7646
7647 /* Initialize each parameter */
7648 for (i = 0; i < NUM_CFG_PARAM; i++) {
7649 cfg = &hba->config[i];
7650
7651 /* Ensure strings are terminated */
7652 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
7653 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0;
7654
7655 /* Set the current value to the default value */
7656 new_value = cfg->def;
7657
7658 /* First check for the global setting */
7659 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7660 (void *)hba->dip, DDI_PROP_DONTPASS,
7661 cfg->string, new_value);
7662
7663 /* Now check for the per adapter ddiinst setting */
7664 (void) snprintf(string, sizeof (string), "%s%d-%s", DRIVER_NAME,
7665 hba->ddiinst, cfg->string);
7666
7667 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7668 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
7669
7670 /* Now check the parameter */
7671 cfg->current = emlxs_check_parm(hba, i, new_value);
7672 }
7673
7674 return (0);
7675
7676 } /* emlxs_get_props() */
7677
7678
7679 extern uint32_t
emlxs_check_parm(emlxs_hba_t * hba,uint32_t index,uint32_t new_value)7680 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7681 {
7682 emlxs_port_t *port = &PPORT;
7683 uint32_t i;
7684 emlxs_config_t *cfg;
7685 emlxs_vpd_t *vpd = &VPD;
7686
7687 if (index >= NUM_CFG_PARAM) {
7688 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7689 "check_parm failed. Invalid index = %d", index);
7690
7691 return (new_value);
7692 }
7693
7694 cfg = &hba->config[index];
7695
7696 if (new_value > cfg->hi) {
7697 new_value = cfg->def;
7698 } else if (new_value < cfg->low) {
7699 new_value = cfg->def;
7700 }
7701
7702 /* Perform additional checks */
7703 switch (index) {
7704 #ifdef SFCT_SUPPORT
7705 case CFG_NPIV_ENABLE:
7706 if (hba->config[CFG_TARGET_MODE].current &&
7707 hba->config[CFG_DTM_ENABLE].current == 0) {
7708 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7709 "enable-npiv: Not supported in pure target mode. "
7710 "Disabling.");
7711
7712 new_value = 0;
7713 }
7714 break;
7715 #endif /* SFCT_SUPPORT */
7716
7717
7718 case CFG_NUM_NODES:
7719 switch (new_value) {
7720 case 1:
7721 case 2:
7722 /* Must have at least 3 if not 0 */
7723 return (3);
7724
7725 default:
7726 break;
7727 }
7728 break;
7729
7730 case CFG_FW_CHECK:
7731 /* The 0x2 bit implies the 0x1 bit will also be set */
7732 if (new_value & 0x2) {
7733 new_value |= 0x1;
7734 }
7735
7736 /* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
7737 if (!(new_value & 0x3) && (new_value & 0x4)) {
7738 new_value &= ~0x4;
7739 }
7740 break;
7741
7742 case CFG_LINK_SPEED:
7743 if ((new_value > 8) &&
7744 (hba->config[CFG_TOPOLOGY].current == 4)) {
7745 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7746 "link-speed: %dGb not supported in loop topology. "
7747 "Switching to auto detect.",
7748 new_value);
7749
7750 new_value = 0;
7751 break;
7752 }
7753
7754 if (vpd->link_speed) {
7755 switch (new_value) {
7756 case 0:
7757 break;
7758
7759 case 1:
7760 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
7761 new_value = 0;
7762
7763 EMLXS_MSGF(EMLXS_CONTEXT,
7764 &emlxs_init_msg,
7765 "link-speed: 1Gb not supported "
7766 "by adapter. Switching to auto "
7767 "detect.");
7768 }
7769 break;
7770
7771 case 2:
7772 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
7773 new_value = 0;
7774
7775 EMLXS_MSGF(EMLXS_CONTEXT,
7776 &emlxs_init_msg,
7777 "link-speed: 2Gb not supported "
7778 "by adapter. Switching to auto "
7779 "detect.");
7780 }
7781 break;
7782
7783 case 4:
7784 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
7785 new_value = 0;
7786
7787 EMLXS_MSGF(EMLXS_CONTEXT,
7788 &emlxs_init_msg,
7789 "link-speed: 4Gb not supported "
7790 "by adapter. Switching to auto "
7791 "detect.");
7792 }
7793 break;
7794
7795 case 8:
7796 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
7797 new_value = 0;
7798
7799 EMLXS_MSGF(EMLXS_CONTEXT,
7800 &emlxs_init_msg,
7801 "link-speed: 8Gb not supported "
7802 "by adapter. Switching to auto "
7803 "detect.");
7804 }
7805 break;
7806
7807 case 16:
7808 if (!(vpd->link_speed & LMT_16GB_CAPABLE)) {
7809 new_value = 0;
7810
7811 EMLXS_MSGF(EMLXS_CONTEXT,
7812 &emlxs_init_msg,
7813 "link-speed: 16Gb not supported "
7814 "by adapter. Switching to auto "
7815 "detect.");
7816 }
7817 break;
7818
7819 case 32:
7820 if (!(vpd->link_speed & LMT_32GB_CAPABLE)) {
7821 new_value = 0;
7822
7823 EMLXS_MSGF(EMLXS_CONTEXT,
7824 &emlxs_init_msg,
7825 "link-speed: 32Gb not supported "
7826 "by adapter. Switching to auto "
7827 "detect.");
7828 }
7829 break;
7830
7831 default:
7832 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7833 "link-speed: Invalid value=%d provided. "
7834 "Switching to auto detect.",
7835 new_value);
7836
7837 new_value = 0;
7838 }
7839 } else { /* Perform basic validity check */
7840
7841 /* Perform additional check on link speed */
7842 switch (new_value) {
7843 case 0:
7844 case 1:
7845 case 2:
7846 case 4:
7847 case 8:
7848 case 16:
7849 /* link-speed is a valid choice */
7850 break;
7851
7852 default:
7853 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7854 "link-speed: Invalid value=%d provided. "
7855 "Switching to auto detect.",
7856 new_value);
7857
7858 new_value = 0;
7859 }
7860 }
7861 break;
7862
7863 case CFG_TOPOLOGY:
7864 if ((new_value == 4) &&
7865 (hba->config[CFG_LINK_SPEED].current > 8)) {
7866 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7867 "topology: Loop topology not supported "
7868 "with link speeds greater than 8Gb. "
7869 "Switching to auto detect.");
7870
7871 new_value = 0;
7872 break;
7873 }
7874
7875 /* Perform additional check on topology */
7876 switch (new_value) {
7877 case 0:
7878 case 2:
7879 case 4:
7880 case 6:
7881 /* topology is a valid choice */
7882 break;
7883
7884 default:
7885 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7886 "topology: Invalid value=%d provided. "
7887 "Switching to auto detect.",
7888 new_value);
7889
7890 new_value = 0;
7891 break;
7892 }
7893 break;
7894
7895 #ifdef DHCHAP_SUPPORT
7896 case CFG_AUTH_TYPE:
7897 {
7898 uint32_t shift;
7899 uint32_t mask;
7900
7901 /* Perform additional check on auth type */
7902 shift = 12;
7903 mask = 0xF000;
7904 for (i = 0; i < 4; i++) {
7905 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7906 return (cfg->def);
7907 }
7908
7909 shift -= 4;
7910 mask >>= 4;
7911 }
7912 break;
7913 }
7914
7915 case CFG_AUTH_HASH:
7916 {
7917 uint32_t shift;
7918 uint32_t mask;
7919
7920 /* Perform additional check on auth hash */
7921 shift = 12;
7922 mask = 0xF000;
7923 for (i = 0; i < 4; i++) {
7924 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7925 return (cfg->def);
7926 }
7927
7928 shift -= 4;
7929 mask >>= 4;
7930 }
7931 break;
7932 }
7933
7934 case CFG_AUTH_GROUP:
7935 {
7936 uint32_t shift;
7937 uint32_t mask;
7938
7939 /* Perform additional check on auth group */
7940 shift = 28;
7941 mask = 0xF0000000;
7942 for (i = 0; i < 8; i++) {
7943 if (((new_value & mask) >> shift) >
7944 DFC_AUTH_GROUP_MAX) {
7945 return (cfg->def);
7946 }
7947
7948 shift -= 4;
7949 mask >>= 4;
7950 }
7951 break;
7952 }
7953
7954 case CFG_AUTH_INTERVAL:
7955 if (new_value < 10) {
7956 return (10);
7957 }
7958 break;
7959
7960
7961 #endif /* DHCHAP_SUPPORT */
7962
7963 } /* switch */
7964
7965 return (new_value);
7966
7967 } /* emlxs_check_parm() */
7968
7969
7970 extern uint32_t
emlxs_set_parm(emlxs_hba_t * hba,uint32_t index,uint32_t new_value)7971 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7972 {
7973 emlxs_port_t *port = &PPORT;
7974 emlxs_port_t *vport;
7975 uint32_t vpi;
7976 emlxs_config_t *cfg;
7977 uint32_t old_value;
7978
7979 if (index >= NUM_CFG_PARAM) {
7980 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7981 "set_parm failed. Invalid index = %d", index);
7982
7983 return ((uint32_t)FC_FAILURE);
7984 }
7985
7986 cfg = &hba->config[index];
7987
7988 if (!(cfg->flags & PARM_DYNAMIC)) {
7989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7990 "set_parm failed. %s is not dynamic.", cfg->string);
7991
7992 return ((uint32_t)FC_FAILURE);
7993 }
7994
7995 /* Check new value */
7996 old_value = new_value;
7997 new_value = emlxs_check_parm(hba, index, new_value);
7998
7999 if (old_value != new_value) {
8000 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
8001 "set_parm: %s invalid. 0x%x --> 0x%x",
8002 cfg->string, old_value, new_value);
8003 }
8004
8005 /* Return now if no actual change */
8006 if (new_value == cfg->current) {
8007 return (FC_SUCCESS);
8008 }
8009
8010 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
8011 "set_parm: %s changing. 0x%x --> 0x%x",
8012 cfg->string, cfg->current, new_value);
8013
8014 old_value = cfg->current;
8015 cfg->current = new_value;
8016
8017 /* React to change if needed */
8018 switch (index) {
8019
8020 case CFG_PCI_MAX_READ:
8021 /* Update MXR */
8022 emlxs_pcix_mxr_update(hba, 1);
8023 break;
8024
8025 #ifdef SFCT_SUPPORT
8026 case CFG_TARGET_MODE:
8027 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8028 break;
8029 #endif /* SFCT_SUPPORT */
8030
8031 case CFG_SLI_MODE:
8032 /* Check SLI mode */
8033 if ((hba->sli_mode == 3) && (new_value == 2)) {
8034 /* All vports must be disabled first */
8035 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8036 vport = &VPORT(vpi);
8037
8038 if (vport->flag & EMLXS_PORT_ENABLED) {
8039 /* Reset current value */
8040 cfg->current = old_value;
8041
8042 EMLXS_MSGF(EMLXS_CONTEXT,
8043 &emlxs_sfs_debug_msg,
8044 "set_parm failed. %s: vpi=%d "
8045 "still enabled. Value restored to "
8046 "0x%x.", cfg->string, vpi,
8047 old_value);
8048
8049 return (2);
8050 }
8051 }
8052 }
8053
8054 if ((hba->sli_mode >= 4) && (new_value < 4)) {
8055 /*
8056 * Not allow to set to SLI 2 or 3 if HBA supports SLI4
8057 */
8058 cfg->current = old_value;
8059 return ((uint32_t)FC_FAILURE);
8060 }
8061
8062 break;
8063
8064 case CFG_NPIV_ENABLE:
8065 /* Check if NPIV is being disabled */
8066 if ((old_value == 1) && (new_value == 0)) {
8067 /* All vports must be disabled first */
8068 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8069 vport = &VPORT(vpi);
8070
8071 if (vport->flag & EMLXS_PORT_ENABLED) {
8072 /* Reset current value */
8073 cfg->current = old_value;
8074
8075 EMLXS_MSGF(EMLXS_CONTEXT,
8076 &emlxs_sfs_debug_msg,
8077 "set_parm failed. %s: vpi=%d "
8078 "still enabled. Value restored to "
8079 "0x%x.", cfg->string, vpi,
8080 old_value);
8081
8082 return (2);
8083 }
8084 }
8085 }
8086
8087 /* Trigger adapter reset */
8088 /* (void) emlxs_reset(port, FC_FCA_RESET); */
8089
8090 break;
8091
8092
8093 case CFG_VPORT_RESTRICTED:
8094 for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
8095 vport = &VPORT(vpi);
8096
8097 if (!(vport->flag & EMLXS_PORT_CONFIG)) {
8098 continue;
8099 }
8100
8101 if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
8102 continue;
8103 }
8104
8105 if (new_value) {
8106 vport->flag |= EMLXS_PORT_RESTRICTED;
8107 } else {
8108 vport->flag &= ~EMLXS_PORT_RESTRICTED;
8109 }
8110 }
8111
8112 break;
8113
8114 #ifdef DHCHAP_SUPPORT
8115 case CFG_AUTH_ENABLE:
8116 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8117 break;
8118
8119 case CFG_AUTH_TMO:
8120 hba->auth_cfg.authentication_timeout = cfg->current;
8121 break;
8122
8123 case CFG_AUTH_MODE:
8124 hba->auth_cfg.authentication_mode = cfg->current;
8125 break;
8126
8127 case CFG_AUTH_BIDIR:
8128 hba->auth_cfg.bidirectional = cfg->current;
8129 break;
8130
8131 case CFG_AUTH_TYPE:
8132 hba->auth_cfg.authentication_type_priority[0] =
8133 (cfg->current & 0xF000) >> 12;
8134 hba->auth_cfg.authentication_type_priority[1] =
8135 (cfg->current & 0x0F00) >> 8;
8136 hba->auth_cfg.authentication_type_priority[2] =
8137 (cfg->current & 0x00F0) >> 4;
8138 hba->auth_cfg.authentication_type_priority[3] =
8139 (cfg->current & 0x000F);
8140 break;
8141
8142 case CFG_AUTH_HASH:
8143 hba->auth_cfg.hash_priority[0] =
8144 (cfg->current & 0xF000) >> 12;
8145 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
8146 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
8147 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
8148 break;
8149
8150 case CFG_AUTH_GROUP:
8151 hba->auth_cfg.dh_group_priority[0] =
8152 (cfg->current & 0xF0000000) >> 28;
8153 hba->auth_cfg.dh_group_priority[1] =
8154 (cfg->current & 0x0F000000) >> 24;
8155 hba->auth_cfg.dh_group_priority[2] =
8156 (cfg->current & 0x00F00000) >> 20;
8157 hba->auth_cfg.dh_group_priority[3] =
8158 (cfg->current & 0x000F0000) >> 16;
8159 hba->auth_cfg.dh_group_priority[4] =
8160 (cfg->current & 0x0000F000) >> 12;
8161 hba->auth_cfg.dh_group_priority[5] =
8162 (cfg->current & 0x00000F00) >> 8;
8163 hba->auth_cfg.dh_group_priority[6] =
8164 (cfg->current & 0x000000F0) >> 4;
8165 hba->auth_cfg.dh_group_priority[7] =
8166 (cfg->current & 0x0000000F);
8167 break;
8168
8169 case CFG_AUTH_INTERVAL:
8170 hba->auth_cfg.reauthenticate_time_interval = cfg->current;
8171 break;
8172 #endif /* DHCHAP_SUPPORT */
8173
8174 }
8175
8176 return (FC_SUCCESS);
8177
8178 } /* emlxs_set_parm() */
8179
8180
8181 /*
8182 * emlxs_mem_alloc OS specific routine for memory allocation / mapping
8183 *
8184 * The buf_info->flags field describes the memory operation requested.
8185 *
8186 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA
8187 * Virtual address is supplied in buf_info->virt
8188 * DMA mapping flag is in buf_info->align
8189 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
8190 * The mapped physical address is returned buf_info->phys
8191 *
8192 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
8193 * if FC_MBUF_DMA is set the memory is also mapped for DMA
8194 * The byte alignment of the memory request is supplied in buf_info->align
8195 * The byte size of the memory request is supplied in buf_info->size
8196 * The virtual address is returned buf_info->virt
8197 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
8198 */
8199 extern uint8_t *
emlxs_mem_alloc(emlxs_hba_t * hba,MBUF_INFO * buf_info)8200 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8201 {
8202 emlxs_port_t *port = &PPORT;
8203 ddi_dma_attr_t dma_attr;
8204 ddi_device_acc_attr_t dev_attr;
8205 uint_t cookie_count;
8206 size_t dma_reallen;
8207 ddi_dma_cookie_t dma_cookie;
8208 uint_t dma_flag;
8209 int status;
8210
8211 dma_attr = hba->dma_attr_1sg;
8212 dev_attr = emlxs_data_acc_attr;
8213
8214 if (buf_info->flags & FC_MBUF_SNGLSG) {
8215 dma_attr.dma_attr_sgllen = 1;
8216 }
8217
8218 if (buf_info->flags & FC_MBUF_PHYSONLY) {
8219
8220 if (buf_info->virt == NULL) {
8221 goto done;
8222 }
8223
8224 /*
8225 * Allocate the DMA handle for this DMA object
8226 */
8227 status = ddi_dma_alloc_handle((void *)hba->dip,
8228 &dma_attr, DDI_DMA_DONTWAIT,
8229 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
8230 if (status != DDI_SUCCESS) {
8231 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8232 "ddi_dma_alloc_handle failed: size=%x align=%x "
8233 "flags=%x", buf_info->size, buf_info->align,
8234 buf_info->flags);
8235
8236 buf_info->phys = 0;
8237 buf_info->dma_handle = 0;
8238 goto done;
8239 }
8240
8241 switch (buf_info->align) {
8242 case DMA_READ_WRITE:
8243 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
8244 break;
8245 case DMA_READ_ONLY:
8246 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
8247 break;
8248 case DMA_WRITE_ONLY:
8249 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
8250 break;
8251 default:
8252 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8253 "Invalid DMA flag");
8254 (void) ddi_dma_free_handle(
8255 (ddi_dma_handle_t *)&buf_info->dma_handle);
8256 buf_info->phys = 0;
8257 buf_info->dma_handle = 0;
8258 return ((uint8_t *)buf_info->virt);
8259 }
8260
8261 /* Map this page of memory */
8262 status = ddi_dma_addr_bind_handle(
8263 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8264 (caddr_t)buf_info->virt, (size_t)buf_info->size,
8265 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
8266 &cookie_count);
8267
8268 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8269 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8270 "ddi_dma_addr_bind_handle failed: status=%x "
8271 "count=%x flags=%x", status, cookie_count,
8272 buf_info->flags);
8273
8274 (void) ddi_dma_free_handle(
8275 (ddi_dma_handle_t *)&buf_info->dma_handle);
8276 buf_info->phys = 0;
8277 buf_info->dma_handle = 0;
8278 goto done;
8279 }
8280
8281 if (hba->bus_type == SBUS_FC) {
8282
8283 int32_t burstsizes_limit = 0xff;
8284 int32_t ret_burst;
8285
8286 ret_burst = ddi_dma_burstsizes(
8287 buf_info->dma_handle) & burstsizes_limit;
8288 if (ddi_dma_set_sbus64(buf_info->dma_handle,
8289 ret_burst) == DDI_FAILURE) {
8290 EMLXS_MSGF(EMLXS_CONTEXT,
8291 &emlxs_mem_alloc_failed_msg,
8292 "ddi_dma_set_sbus64 failed.");
8293 }
8294 }
8295
8296 /* Save Physical address */
8297 buf_info->phys = dma_cookie.dmac_laddress;
8298
8299 /*
8300 * Just to be sure, let's add this
8301 */
8302 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8303 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8304
8305 } else if (buf_info->flags & FC_MBUF_DMA) {
8306
8307 dma_attr.dma_attr_align = buf_info->align;
8308
8309 /*
8310 * Allocate the DMA handle for this DMA object
8311 */
8312 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
8313 DDI_DMA_DONTWAIT, NULL,
8314 (ddi_dma_handle_t *)&buf_info->dma_handle);
8315 if (status != DDI_SUCCESS) {
8316 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8317 "ddi_dma_alloc_handle failed: size=%x align=%x "
8318 "flags=%x", buf_info->size, buf_info->align,
8319 buf_info->flags);
8320
8321 buf_info->virt = NULL;
8322 buf_info->phys = 0;
8323 buf_info->data_handle = 0;
8324 buf_info->dma_handle = 0;
8325 goto done;
8326 }
8327
8328 status = ddi_dma_mem_alloc(
8329 (ddi_dma_handle_t)buf_info->dma_handle,
8330 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
8331 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
8332 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
8333
8334 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
8335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8336 "ddi_dma_mem_alloc failed: size=%x align=%x "
8337 "flags=%x", buf_info->size, buf_info->align,
8338 buf_info->flags);
8339
8340 (void) ddi_dma_free_handle(
8341 (ddi_dma_handle_t *)&buf_info->dma_handle);
8342
8343 buf_info->virt = NULL;
8344 buf_info->phys = 0;
8345 buf_info->data_handle = 0;
8346 buf_info->dma_handle = 0;
8347 goto done;
8348 }
8349
8350 /* Map this page of memory */
8351 status = ddi_dma_addr_bind_handle(
8352 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8353 (caddr_t)buf_info->virt, (size_t)buf_info->size,
8354 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
8355 &dma_cookie, &cookie_count);
8356
8357 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8358 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8359 "ddi_dma_addr_bind_handle failed: status=%x "
8360 "count=%d size=%x align=%x flags=%x", status,
8361 cookie_count, buf_info->size, buf_info->align,
8362 buf_info->flags);
8363
8364 (void) ddi_dma_mem_free(
8365 (ddi_acc_handle_t *)&buf_info->data_handle);
8366 (void) ddi_dma_free_handle(
8367 (ddi_dma_handle_t *)&buf_info->dma_handle);
8368
8369 buf_info->virt = NULL;
8370 buf_info->phys = 0;
8371 buf_info->dma_handle = 0;
8372 buf_info->data_handle = 0;
8373 goto done;
8374 }
8375
8376 if (hba->bus_type == SBUS_FC) {
8377 int32_t burstsizes_limit = 0xff;
8378 int32_t ret_burst;
8379
8380 ret_burst =
8381 ddi_dma_burstsizes(buf_info->
8382 dma_handle) & burstsizes_limit;
8383 if (ddi_dma_set_sbus64(buf_info->dma_handle,
8384 ret_burst) == DDI_FAILURE) {
8385 EMLXS_MSGF(EMLXS_CONTEXT,
8386 &emlxs_mem_alloc_failed_msg,
8387 "ddi_dma_set_sbus64 failed.");
8388 }
8389 }
8390
8391 /* Save Physical address */
8392 buf_info->phys = dma_cookie.dmac_laddress;
8393
8394 /* Just to be sure, let's add this */
8395 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8396 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8397
8398 } else { /* allocate virtual memory */
8399
8400 buf_info->virt =
8401 kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
8402 buf_info->phys = 0;
8403 buf_info->data_handle = 0;
8404 buf_info->dma_handle = 0;
8405
8406 if (buf_info->virt == (uint32_t *)0) {
8407 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8408 "size=%x flags=%x", buf_info->size,
8409 buf_info->flags);
8410 }
8411
8412 }
8413
8414 done:
8415
8416 return ((uint8_t *)buf_info->virt);
8417
8418 } /* emlxs_mem_alloc() */
8419
8420
8421
8422 /*
8423 * emlxs_mem_free:
8424 *
8425 * OS specific routine for memory de-allocation / unmapping
8426 *
8427 * The buf_info->flags field describes the memory operation requested.
8428 *
8429 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped
8430 * for DMA, but not freed. The mapped physical address to be unmapped is in
8431 * buf_info->phys
8432 *
8433 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
8434 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
8435 * buf_info->phys. The virtual address to be freed is in buf_info->virt
8436 */
8437 /*ARGSUSED*/
8438 extern void
emlxs_mem_free(emlxs_hba_t * hba,MBUF_INFO * buf_info)8439 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8440 {
8441 if (buf_info->flags & FC_MBUF_PHYSONLY) {
8442
8443 if (buf_info->dma_handle) {
8444 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
8445 (void) ddi_dma_free_handle(
8446 (ddi_dma_handle_t *)&buf_info->dma_handle);
8447 buf_info->dma_handle = NULL;
8448 }
8449
8450 } else if (buf_info->flags & FC_MBUF_DMA) {
8451
8452 if (buf_info->dma_handle) {
8453 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
8454 if (buf_info->data_handle) {
8455 (void) ddi_dma_mem_free(
8456 (ddi_acc_handle_t *)&buf_info->data_handle);
8457 }
8458 (void) ddi_dma_free_handle(
8459 (ddi_dma_handle_t *)&buf_info->dma_handle);
8460 buf_info->dma_handle = NULL;
8461 buf_info->data_handle = NULL;
8462 }
8463
8464 } else { /* allocate virtual memory */
8465
8466 if (buf_info->virt) {
8467 kmem_free(buf_info->virt, (size_t)buf_info->size);
8468 buf_info->virt = NULL;
8469 }
8470 }
8471
8472 } /* emlxs_mem_free() */
8473
8474
8475 static int
emlxs_select_fcp_channel(emlxs_hba_t * hba,NODELIST * ndlp,int reset)8476 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
8477 {
8478 int channel;
8479 int msi_id;
8480
8481
8482 /* IO to FCP2 device or a device reset always use fcp channel */
8483 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
8484 return (hba->channel_fcp);
8485 }
8486
8487
8488 msi_id = emlxs_select_msiid(hba);
8489 channel = emlxs_msiid_to_chan(hba, msi_id);
8490
8491
8492
8493 /* If channel is closed, then try fcp channel */
8494 if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
8495 channel = hba->channel_fcp;
8496 }
8497 return (channel);
8498
8499 } /* emlxs_select_fcp_channel() */
8500
8501
8502 static int32_t
emlxs_fast_target_reset(emlxs_port_t * port,emlxs_buf_t * sbp,NODELIST * ndlp)8503 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
8504 {
8505 emlxs_hba_t *hba = HBA;
8506 fc_packet_t *pkt;
8507 emlxs_config_t *cfg;
8508 MAILBOXQ *mbq;
8509 MAILBOX *mb;
8510 uint32_t rc;
8511
8512 /*
8513 * This routine provides a alternative target reset provessing
8514 * method. Instead of sending an actual target reset to the
8515 * NPort, we will first unreg the login to that NPort. This
8516 * will cause all the outstanding IOs the quickly complete with
8517 * a NO RPI local error. Next we will force the ULP to relogin
8518 * to the NPort by sending an RSCN (for that NPort) to the
8519 * upper layer. This method should result in a fast target
8520 * reset, as far as IOs completing; however, since an actual
8521 * target reset is not sent to the NPort, it is not 100%
8522 * compatable. Things like reservations will not be broken.
8523 * By default this option is DISABLED, and its only enabled thru
8524 * a hidden configuration parameter (fast-tgt-reset).
8525 */
8526 rc = FC_TRAN_BUSY;
8527 pkt = PRIV2PKT(sbp);
8528 cfg = &CFG;
8529
8530 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
8531 /* issue the mbox cmd to the sli */
8532 mb = (MAILBOX *) mbq->mbox;
8533 bzero((void *) mb, MAILBOX_CMD_BSIZE);
8534 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
8535 #ifdef SLI3_SUPPORT
8536 mb->un.varUnregLogin.vpi = port->vpi;
8537 #endif /* SLI3_SUPPORT */
8538 mb->mbxCommand = MBX_UNREG_LOGIN;
8539 mb->mbxOwner = OWN_HOST;
8540
8541 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8542 "Fast Target Reset: unreg rpi=%d tmr=%d", ndlp->nlp_Rpi,
8543 cfg[CFG_FAST_TGT_RESET_TMR].current);
8544
8545 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
8546 == MBX_SUCCESS) {
8547
8548 ndlp->nlp_Rpi = 0;
8549
8550 mutex_enter(&sbp->mtx);
8551 sbp->node = (void *)ndlp;
8552 sbp->did = ndlp->nlp_DID;
8553 mutex_exit(&sbp->mtx);
8554
8555 if (pkt->pkt_rsplen) {
8556 bzero((uint8_t *)pkt->pkt_resp,
8557 pkt->pkt_rsplen);
8558 }
8559 if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
8560 ndlp->nlp_force_rscn = hba->timer_tics +
8561 cfg[CFG_FAST_TGT_RESET_TMR].current;
8562 }
8563
8564 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
8565 }
8566
8567 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
8568 rc = FC_SUCCESS;
8569 }
8570 return (rc);
8571 } /* emlxs_fast_target_reset() */
8572
8573 static int32_t
emlxs_send_fcp_cmd(emlxs_port_t * port,emlxs_buf_t * sbp,uint32_t * pkt_flags)8574 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags)
8575 {
8576 emlxs_hba_t *hba = HBA;
8577 fc_packet_t *pkt;
8578 emlxs_config_t *cfg;
8579 IOCBQ *iocbq;
8580 IOCB *iocb;
8581 CHANNEL *cp;
8582 NODELIST *ndlp;
8583 char *cmd;
8584 uint16_t lun;
8585 FCP_CMND *fcp_cmd;
8586 uint32_t did;
8587 uint32_t reset = 0;
8588 int channel;
8589 int32_t rval;
8590
8591 pkt = PRIV2PKT(sbp);
8592 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8593
8594 /* Find target node object */
8595 ndlp = emlxs_node_find_did(port, did, 1);
8596
8597 if (!ndlp || !ndlp->nlp_active) {
8598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8599 "Node not found. did=%x", did);
8600
8601 return (FC_BADPACKET);
8602 }
8603
8604 /* When the fcp channel is closed we stop accepting any FCP cmd */
8605 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8606 return (FC_TRAN_BUSY);
8607 }
8608
8609 /* Snoop for target or lun reset first */
8610 /* We always use FCP channel to send out target/lun reset fcp cmds */
8611 /* interrupt affinity only applies to non tgt lun reset fcp cmd */
8612
8613 cmd = (char *)pkt->pkt_cmd;
8614 lun = *((uint16_t *)cmd);
8615 lun = LE_SWAP16(lun);
8616
8617 iocbq = &sbp->iocbq;
8618 iocb = &iocbq->iocb;
8619 iocbq->node = (void *) ndlp;
8620
8621 /* Check for target reset */
8622 if (cmd[10] & 0x20) {
8623 /* prepare iocb */
8624 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8625 hba->channel_fcp)) != FC_SUCCESS) {
8626
8627 if (rval == 0xff) {
8628 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8629 0, 1);
8630 rval = FC_SUCCESS;
8631 }
8632
8633 return (rval);
8634 }
8635
8636 mutex_enter(&sbp->mtx);
8637 sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
8638 sbp->pkt_flags |= PACKET_POLLED;
8639 *pkt_flags = sbp->pkt_flags;
8640 mutex_exit(&sbp->mtx);
8641
8642 #ifdef SAN_DIAG_SUPPORT
8643 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
8644 (HBA_WWN *)&ndlp->nlp_portname, -1);
8645 #endif /* SAN_DIAG_SUPPORT */
8646
8647 iocbq->flag |= IOCB_PRIORITY;
8648
8649 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8650 "Target Reset: did=%x", did);
8651
8652 cfg = &CFG;
8653 if (cfg[CFG_FAST_TGT_RESET].current) {
8654 if (emlxs_fast_target_reset(port, sbp, ndlp) ==
8655 FC_SUCCESS) {
8656 return (FC_SUCCESS);
8657 }
8658 }
8659
8660 /* Close the node for any further normal IO */
8661 emlxs_node_close(port, ndlp, hba->channel_fcp,
8662 pkt->pkt_timeout);
8663
8664 /* Flush the IO's on the tx queues */
8665 (void) emlxs_tx_node_flush(port, ndlp,
8666 &hba->chan[hba->channel_fcp], 0, sbp);
8667
8668 /* This is the target reset fcp cmd */
8669 reset = 1;
8670 }
8671
8672 /* Check for lun reset */
8673 else if (cmd[10] & 0x10) {
8674 /* prepare iocb */
8675 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8676 hba->channel_fcp)) != FC_SUCCESS) {
8677
8678 if (rval == 0xff) {
8679 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8680 0, 1);
8681 rval = FC_SUCCESS;
8682 }
8683
8684 return (rval);
8685 }
8686
8687 mutex_enter(&sbp->mtx);
8688 sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
8689 sbp->pkt_flags |= PACKET_POLLED;
8690 *pkt_flags = sbp->pkt_flags;
8691 mutex_exit(&sbp->mtx);
8692
8693 #ifdef SAN_DIAG_SUPPORT
8694 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
8695 (HBA_WWN *)&ndlp->nlp_portname, lun);
8696 #endif /* SAN_DIAG_SUPPORT */
8697
8698 iocbq->flag |= IOCB_PRIORITY;
8699
8700 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8701 "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun,
8702 cmd[0], cmd[1]);
8703
8704 /* Flush the IO's on the tx queues for this lun */
8705 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
8706
8707 /* This is the lun reset fcp cmd */
8708 reset = 1;
8709 }
8710
8711 channel = emlxs_select_fcp_channel(hba, ndlp, reset);
8712
8713 #ifdef SAN_DIAG_SUPPORT
8714 sbp->sd_start_time = gethrtime();
8715 #endif /* SAN_DIAG_SUPPORT */
8716
8717 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8718 emlxs_swap_fcp_pkt(sbp);
8719 #endif /* EMLXS_MODREV2X */
8720
8721 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
8722
8723 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
8724 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
8725 }
8726
8727 if (reset == 0) {
8728 /*
8729 * tgt lun reset fcp cmd has been prepared
8730 * separately in the beginning
8731 */
8732 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8733 channel)) != FC_SUCCESS) {
8734
8735 if (rval == 0xff) {
8736 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8737 0, 1);
8738 rval = FC_SUCCESS;
8739 }
8740
8741 return (rval);
8742 }
8743 }
8744
8745 cp = &hba->chan[channel];
8746 cp->ulpSendCmd++;
8747
8748 /* Initalize sbp */
8749 mutex_enter(&sbp->mtx);
8750 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8751 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8752 sbp->node = (void *)ndlp;
8753 sbp->lun = lun;
8754 sbp->class = iocb->ULPCLASS;
8755 sbp->did = ndlp->nlp_DID;
8756 mutex_exit(&sbp->mtx);
8757
8758 if (pkt->pkt_cmdlen) {
8759 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8760 DDI_DMA_SYNC_FORDEV);
8761 }
8762
8763 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
8764 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
8765 DDI_DMA_SYNC_FORDEV);
8766 }
8767
8768 HBASTATS.FcpIssued++;
8769
8770 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8771 return (FC_SUCCESS);
8772
8773 } /* emlxs_send_fcp_cmd() */
8774
8775
8776
8777
8778 /*
8779 * We have to consider this setup works for INTX, MSI, and MSIX
8780 * For INTX, intr_count is always 1
8781 * For MSI, intr_count is always 2 by default
8782 * For MSIX, intr_count is configurable (1, 2, 4, 8) for now.
8783 */
8784 extern int
emlxs_select_msiid(emlxs_hba_t * hba)8785 emlxs_select_msiid(emlxs_hba_t *hba)
8786 {
8787 int msiid = 0;
8788
8789 /* We use round-robin */
8790 mutex_enter(&EMLXS_MSIID_LOCK);
8791 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8792 msiid = hba->last_msiid;
8793 hba->last_msiid ++;
8794 if (hba->last_msiid >= hba->intr_count) {
8795 hba->last_msiid = 0;
8796 }
8797 } else {
8798 /* This should work for INTX and MSI also */
8799 /* For SLI3 the chan_count is always 4 */
8800 /* For SLI3 the msiid is limited to chan_count */
8801 msiid = hba->last_msiid;
8802 hba->last_msiid ++;
8803 if (hba->intr_count > hba->chan_count) {
8804 if (hba->last_msiid >= hba->chan_count) {
8805 hba->last_msiid = 0;
8806 }
8807 } else {
8808 if (hba->last_msiid >= hba->intr_count) {
8809 hba->last_msiid = 0;
8810 }
8811 }
8812 }
8813 mutex_exit(&EMLXS_MSIID_LOCK);
8814
8815 return (msiid);
8816 } /* emlxs_select_msiid */
8817
8818
8819 /*
8820 * A channel has a association with a msi id.
8821 * One msi id could be associated with multiple channels.
8822 */
8823 extern int
emlxs_msiid_to_chan(emlxs_hba_t * hba,int msi_id)8824 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id)
8825 {
8826 emlxs_config_t *cfg = &CFG;
8827 EQ_DESC_t *eqp;
8828 int chan;
8829 int num_wq;
8830
8831 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8832 /* For SLI4 round robin all WQs associated with the msi_id */
8833 eqp = &hba->sli.sli4.eq[msi_id];
8834
8835 mutex_enter(&eqp->lastwq_lock);
8836 chan = eqp->lastwq;
8837 eqp->lastwq++;
8838 num_wq = cfg[CFG_NUM_WQ].current;
8839 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
8840 eqp->lastwq -= num_wq;
8841 }
8842 mutex_exit(&eqp->lastwq_lock);
8843
8844 return (chan);
8845 } else {
8846 /* This is for SLI3 mode */
8847 return (hba->msi2chan[msi_id]);
8848 }
8849
8850 } /* emlxs_msiid_to_chan */
8851
8852
8853 #ifdef SFCT_SUPPORT
8854 static int32_t
emlxs_send_fct_status(emlxs_port_t * port,emlxs_buf_t * sbp)8855 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
8856 {
8857 emlxs_hba_t *hba = HBA;
8858 IOCBQ *iocbq;
8859 IOCB *iocb;
8860 NODELIST *ndlp;
8861 CHANNEL *cp;
8862 uint32_t did;
8863
8864 did = sbp->did;
8865 ndlp = sbp->node;
8866 cp = (CHANNEL *)sbp->channel;
8867
8868 iocbq = &sbp->iocbq;
8869 iocb = &iocbq->iocb;
8870
8871 /* Make sure node is still active */
8872 if (!ndlp->nlp_active) {
8873 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8874 "*Node not found. did=%x", did);
8875
8876 return (FC_BADPACKET);
8877 }
8878
8879 /* If gate is closed */
8880 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8881 return (FC_TRAN_BUSY);
8882 }
8883
8884 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8885 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8886 IOERR_SUCCESS) {
8887 return (FC_TRAN_BUSY);
8888 }
8889
8890 HBASTATS.FcpIssued++;
8891
8892 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8893
8894 return (FC_SUCCESS);
8895
8896 } /* emlxs_send_fct_status() */
8897
8898
8899 static int32_t
emlxs_send_fct_abort(emlxs_port_t * port,emlxs_buf_t * sbp)8900 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8901 {
8902 emlxs_hba_t *hba = HBA;
8903 IOCBQ *iocbq;
8904 IOCB *iocb;
8905 NODELIST *ndlp;
8906 CHANNEL *cp;
8907 uint32_t did;
8908
8909 did = sbp->did;
8910 ndlp = sbp->node;
8911 cp = (CHANNEL *)sbp->channel;
8912
8913 iocbq = &sbp->iocbq;
8914 iocb = &iocbq->iocb;
8915
8916 /* Make sure node is still active */
8917 if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8918 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8919 "*Node not found. did=%x", did);
8920
8921 return (FC_BADPACKET);
8922 }
8923
8924 /* If gate is closed */
8925 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8926 return (FC_TRAN_BUSY);
8927 }
8928
8929 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8930 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8931 IOERR_SUCCESS) {
8932 return (FC_TRAN_BUSY);
8933 }
8934
8935 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8936
8937 return (FC_SUCCESS);
8938
8939 } /* emlxs_send_fct_abort() */
8940
8941 #endif /* SFCT_SUPPORT */
8942
8943
8944 static int32_t
emlxs_send_ip(emlxs_port_t * port,emlxs_buf_t * sbp)8945 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8946 {
8947 emlxs_hba_t *hba = HBA;
8948 fc_packet_t *pkt;
8949 IOCBQ *iocbq;
8950 IOCB *iocb;
8951 CHANNEL *cp;
8952 uint32_t i;
8953 NODELIST *ndlp;
8954 uint32_t did;
8955 int32_t rval;
8956
8957 pkt = PRIV2PKT(sbp);
8958 cp = &hba->chan[hba->channel_ip];
8959 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8960
8961 /* Check if node exists */
8962 /* Broadcast did is always a success */
8963 ndlp = emlxs_node_find_did(port, did, 1);
8964
8965 if (!ndlp || !ndlp->nlp_active) {
8966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8967 "Node not found. did=0x%x", did);
8968
8969 return (FC_BADPACKET);
8970 }
8971
8972 /* Check if gate is temporarily closed */
8973 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8974 return (FC_TRAN_BUSY);
8975 }
8976
8977 /* Check if an exchange has been created */
8978 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8979 /* No exchange. Try creating one */
8980 (void) emlxs_create_xri(port, cp, ndlp);
8981
8982 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8983 "Adapter Busy. Exchange not found. did=0x%x", did);
8984
8985 return (FC_TRAN_BUSY);
8986 }
8987
8988 /* ULP PATCH: pkt_cmdlen was found to be set to zero */
8989 /* on BROADCAST commands */
8990 if (pkt->pkt_cmdlen == 0) {
8991 /* Set the pkt_cmdlen to the cookie size */
8992 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8993 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8994 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8995 }
8996 #else
8997 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8998 #endif /* >= EMLXS_MODREV3 */
8999
9000 }
9001
9002 iocbq = &sbp->iocbq;
9003 iocb = &iocbq->iocb;
9004
9005 iocbq->node = (void *)ndlp;
9006 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
9007
9008 if (rval == 0xff) {
9009 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9010 rval = FC_SUCCESS;
9011 }
9012
9013 return (rval);
9014 }
9015
9016 cp->ulpSendCmd++;
9017
9018 /* Initalize sbp */
9019 mutex_enter(&sbp->mtx);
9020 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9021 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9022 sbp->node = (void *)ndlp;
9023 sbp->lun = EMLXS_LUN_NONE;
9024 sbp->class = iocb->ULPCLASS;
9025 sbp->did = did;
9026 mutex_exit(&sbp->mtx);
9027
9028 if (pkt->pkt_cmdlen) {
9029 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9030 DDI_DMA_SYNC_FORDEV);
9031 }
9032
9033 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9034
9035 return (FC_SUCCESS);
9036
9037 } /* emlxs_send_ip() */
9038
9039
9040 static int32_t
emlxs_send_els(emlxs_port_t * port,emlxs_buf_t * sbp)9041 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
9042 {
9043 emlxs_hba_t *hba = HBA;
9044 emlxs_port_t *vport;
9045 fc_packet_t *pkt;
9046 IOCBQ *iocbq;
9047 CHANNEL *cp;
9048 SERV_PARM *sp;
9049 uint32_t cmd;
9050 int i;
9051 ELS_PKT *els_pkt;
9052 NODELIST *ndlp;
9053 uint32_t did;
9054 char fcsp_msg[32];
9055 int rc;
9056 int32_t rval;
9057 emlxs_config_t *cfg = &CFG;
9058
9059 fcsp_msg[0] = 0;
9060 pkt = PRIV2PKT(sbp);
9061 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9062 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9063
9064 iocbq = &sbp->iocbq;
9065
9066 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9067 emlxs_swap_els_pkt(sbp);
9068 #endif /* EMLXS_MODREV2X */
9069
9070 cmd = *((uint32_t *)pkt->pkt_cmd);
9071 cmd &= ELS_CMD_MASK;
9072
9073 /* Point of no return, except for ADISC & PLOGI */
9074
9075 /* Check node */
9076 switch (cmd) {
9077 case ELS_CMD_FLOGI:
9078 case ELS_CMD_FDISC:
9079 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9080
9081 if (emlxs_vpi_logi_notify(port, sbp)) {
9082 pkt->pkt_state = FC_PKT_LOCAL_RJT;
9083 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9084 emlxs_unswap_pkt(sbp);
9085 #endif /* EMLXS_MODREV2X */
9086 return (FC_FAILURE);
9087 }
9088 } else {
9089 /*
9090 * If FLOGI is already complete, then we
9091 * should not be receiving another FLOGI.
9092 * Reset the link to recover.
9093 */
9094 if (port->flag & EMLXS_PORT_FLOGI_CMPL) {
9095 pkt->pkt_state = FC_PKT_LOCAL_RJT;
9096 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9097 emlxs_unswap_pkt(sbp);
9098 #endif /* EMLXS_MODREV2X */
9099
9100 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
9101 return (FC_FAILURE);
9102 }
9103
9104 if (port->vpi > 0) {
9105 *((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC;
9106 }
9107 }
9108
9109 /* Command may have been changed */
9110 cmd = *((uint32_t *)pkt->pkt_cmd);
9111 cmd &= ELS_CMD_MASK;
9112
9113 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9114 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9115 }
9116
9117 ndlp = NULL;
9118
9119 /* We will process these cmds at the bottom of this routine */
9120 break;
9121
9122 case ELS_CMD_PLOGI:
9123 /* Make sure we don't log into ourself */
9124 for (i = 0; i < MAX_VPORTS; i++) {
9125 vport = &VPORT(i);
9126
9127 if (!(vport->flag & EMLXS_INI_BOUND)) {
9128 continue;
9129 }
9130
9131 if (did == vport->did) {
9132 pkt->pkt_state = FC_PKT_NPORT_RJT;
9133
9134 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9135 emlxs_unswap_pkt(sbp);
9136 #endif /* EMLXS_MODREV2X */
9137
9138 return (FC_FAILURE);
9139 }
9140 }
9141
9142 ndlp = NULL;
9143
9144 if (hba->flag & FC_PT_TO_PT) {
9145 MAILBOXQ *mbox;
9146
9147 /* ULP bug fix */
9148 if (pkt->pkt_cmd_fhdr.s_id == 0) {
9149 pkt->pkt_cmd_fhdr.s_id = FP_DEFAULT_SID;
9150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
9151 "PLOGI: P2P Fix. sid=0-->%x did=%x",
9152 pkt->pkt_cmd_fhdr.s_id,
9153 pkt->pkt_cmd_fhdr.d_id);
9154 }
9155
9156 mutex_enter(&EMLXS_PORT_LOCK);
9157 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
9158 port->rdid = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9159 mutex_exit(&EMLXS_PORT_LOCK);
9160
9161 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
9162 /* Update our service parms */
9163 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
9164 MEM_MBOX))) {
9165 emlxs_mb_config_link(hba, mbox);
9166
9167 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
9168 mbox, MBX_NOWAIT, 0);
9169 if ((rc != MBX_BUSY) &&
9170 (rc != MBX_SUCCESS)) {
9171 emlxs_mem_put(hba, MEM_MBOX,
9172 (void *)mbox);
9173 }
9174 }
9175 }
9176 }
9177
9178 /* We will process these cmds at the bottom of this routine */
9179 break;
9180
9181 default:
9182 ndlp = emlxs_node_find_did(port, did, 1);
9183
9184 /* If an ADISC is being sent and we have no node, */
9185 /* then we must fail the ADISC now */
9186 if (!ndlp && (cmd == ELS_CMD_ADISC) &&
9187 (port->mode == MODE_INITIATOR)) {
9188
9189 /* Build the LS_RJT response */
9190 els_pkt = (ELS_PKT *)pkt->pkt_resp;
9191 els_pkt->elsCode = 0x01;
9192 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
9193 els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
9194 LSRJT_LOGICAL_ERR;
9195 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
9196 LSEXP_NOTHING_MORE;
9197 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
9198
9199 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9200 "ADISC Rejected. Node not found. did=0x%x", did);
9201
9202 if (sbp->channel == NULL) {
9203 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9204 sbp->channel =
9205 &hba->chan[hba->channel_els];
9206 } else {
9207 sbp->channel =
9208 &hba->chan[FC_ELS_RING];
9209 }
9210 }
9211
9212 /* Return this as rejected by the target */
9213 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
9214
9215 return (FC_SUCCESS);
9216 }
9217 }
9218
9219 /* DID == BCAST_DID is special case to indicate that */
9220 /* RPI is being passed in seq_id field */
9221 /* This is used by emlxs_send_logo() for target mode */
9222
9223 /* Initalize iocbq */
9224 iocbq->node = (void *)ndlp;
9225 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9226
9227 if (rval == 0xff) {
9228 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9229 rval = FC_SUCCESS;
9230 }
9231
9232 return (rval);
9233 }
9234
9235 cp = &hba->chan[hba->channel_els];
9236 cp->ulpSendCmd++;
9237 sp = (SERV_PARM *)&els_pkt->un.logi;
9238
9239 /* Check cmd */
9240 switch (cmd) {
9241 case ELS_CMD_PRLI:
9242 /*
9243 * if our firmware version is 3.20 or later,
9244 * set the following bits for FC-TAPE support.
9245 */
9246 if ((port->mode == MODE_INITIATOR) &&
9247 (hba->vpd.feaLevelHigh >= 0x02) &&
9248 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9249 els_pkt->un.prli.ConfmComplAllowed = 1;
9250 els_pkt->un.prli.Retry = 1;
9251 els_pkt->un.prli.TaskRetryIdReq = 1;
9252 } else {
9253 els_pkt->un.prli.ConfmComplAllowed = 0;
9254 els_pkt->un.prli.Retry = 0;
9255 els_pkt->un.prli.TaskRetryIdReq = 0;
9256 }
9257
9258 break;
9259
9260 /* This is a patch for the ULP stack. */
9261
9262 /*
9263 * ULP only reads our service parameters once during bind_port,
9264 * but the service parameters change due to topology.
9265 */
9266 case ELS_CMD_FLOGI:
9267 case ELS_CMD_FDISC:
9268 case ELS_CMD_PLOGI:
9269 case ELS_CMD_PDISC:
9270 /* Copy latest service parameters to payload */
9271 bcopy((void *) &port->sparam, (void *)sp, sizeof (SERV_PARM));
9272
9273 if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
9274
9275 /* Clear support for virtual fabrics */
9276 /* randomOffset bit controls this for FLOGI */
9277 sp->cmn.randomOffset = 0;
9278
9279 /* Set R_A_TOV to current value */
9280 sp->cmn.w2.r_a_tov =
9281 LE_SWAP32((hba->fc_ratov * 1000));
9282 }
9283
9284 if ((hba->flag & FC_NPIV_ENABLED) &&
9285 (hba->flag & FC_NPIV_SUPPORTED) &&
9286 (cmd == ELS_CMD_PLOGI)) {
9287 emlxs_vvl_fmt_t *vvl;
9288
9289 sp->VALID_VENDOR_VERSION = 1;
9290 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
9291 vvl->un0.w0.oui = 0x0000C9;
9292 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
9293 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0;
9294 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
9295 }
9296
9297 #ifdef DHCHAP_SUPPORT
9298 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9299 #endif /* DHCHAP_SUPPORT */
9300
9301 break;
9302 }
9303
9304 /* Initialize the sbp */
9305 mutex_enter(&sbp->mtx);
9306 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9307 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9308 sbp->node = (void *)ndlp;
9309 sbp->lun = EMLXS_LUN_NONE;
9310 sbp->did = did;
9311 mutex_exit(&sbp->mtx);
9312
9313 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
9314 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
9315
9316 if (pkt->pkt_cmdlen) {
9317 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9318 DDI_DMA_SYNC_FORDEV);
9319 }
9320
9321 /* Check node */
9322 switch (cmd) {
9323 case ELS_CMD_FLOGI:
9324 case ELS_CMD_FDISC:
9325 if (port->mode == MODE_INITIATOR) {
9326 /* Make sure fabric node is destroyed */
9327 /* It should already have been destroyed at link down */
9328 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
9329 ndlp = emlxs_node_find_did(port, FABRIC_DID, 1);
9330 if (ndlp) {
9331 if (EMLXS_SLI_UNREG_NODE(port, ndlp,
9332 NULL, NULL, iocbq) == 0) {
9333 /* Deferring iocb tx until */
9334 /* completion of unreg */
9335 return (FC_SUCCESS);
9336 }
9337 }
9338 }
9339 }
9340 break;
9341
9342 case ELS_CMD_PLOGI:
9343
9344 ndlp = emlxs_node_find_did(port, did, 1);
9345
9346 if (ndlp && ndlp->nlp_active) {
9347 /* Close the node for any further normal IO */
9348 emlxs_node_close(port, ndlp, hba->channel_fcp,
9349 pkt->pkt_timeout + 10);
9350 emlxs_node_close(port, ndlp, hba->channel_ip,
9351 pkt->pkt_timeout + 10);
9352
9353 /* Flush tx queues */
9354 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9355
9356 /* Flush chip queues */
9357 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9358 }
9359
9360 break;
9361
9362 case ELS_CMD_PRLI:
9363
9364 ndlp = emlxs_node_find_did(port, did, 1);
9365
9366 if (ndlp && ndlp->nlp_active) {
9367 /*
9368 * Close the node for any further FCP IO;
9369 * Flush all outstanding I/O only if
9370 * "Establish Image Pair" bit is set.
9371 */
9372 emlxs_node_close(port, ndlp, hba->channel_fcp,
9373 pkt->pkt_timeout + 10);
9374
9375 if (els_pkt->un.prli.estabImagePair) {
9376 /* Flush tx queues */
9377 (void) emlxs_tx_node_flush(port, ndlp,
9378 &hba->chan[hba->channel_fcp], 0, 0);
9379
9380 /* Flush chip queues */
9381 (void) emlxs_chipq_node_flush(port,
9382 &hba->chan[hba->channel_fcp], ndlp, 0);
9383 }
9384 }
9385
9386 break;
9387
9388 }
9389
9390 HBASTATS.ElsCmdIssued++;
9391
9392 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9393
9394 return (FC_SUCCESS);
9395
9396 } /* emlxs_send_els() */
9397
9398
9399
9400
9401 static int32_t
emlxs_send_els_rsp(emlxs_port_t * port,emlxs_buf_t * sbp)9402 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9403 {
9404 emlxs_hba_t *hba = HBA;
9405 emlxs_config_t *cfg = &CFG;
9406 fc_packet_t *pkt;
9407 IOCBQ *iocbq;
9408 IOCB *iocb;
9409 NODELIST *ndlp;
9410 CHANNEL *cp;
9411 int i;
9412 uint32_t cmd;
9413 uint32_t ucmd;
9414 ELS_PKT *els_pkt;
9415 fc_unsol_buf_t *ubp;
9416 emlxs_ub_priv_t *ub_priv;
9417 uint32_t did;
9418 char fcsp_msg[32];
9419 uint8_t *ub_buffer;
9420 int32_t rval;
9421
9422 fcsp_msg[0] = 0;
9423 pkt = PRIV2PKT(sbp);
9424 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9425 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9426
9427 iocbq = &sbp->iocbq;
9428 iocb = &iocbq->iocb;
9429
9430 /* Acquire the unsolicited command this pkt is replying to */
9431 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
9432 /* This is for auto replies when no ub's are used */
9433 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
9434 ubp = NULL;
9435 ub_priv = NULL;
9436 ub_buffer = NULL;
9437
9438 #ifdef SFCT_SUPPORT
9439 if (sbp->fct_cmd) {
9440 fct_els_t *els =
9441 (fct_els_t *)sbp->fct_cmd->cmd_specific;
9442 ub_buffer = (uint8_t *)els->els_req_payload;
9443 }
9444 #endif /* SFCT_SUPPORT */
9445
9446 } else {
9447 /* Find the ub buffer that goes with this reply */
9448 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
9449 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
9450 "ELS reply: Invalid oxid=%x",
9451 pkt->pkt_cmd_fhdr.ox_id);
9452 return (FC_BADPACKET);
9453 }
9454
9455 ub_buffer = (uint8_t *)ubp->ub_buffer;
9456 ub_priv = ubp->ub_fca_private;
9457 ucmd = ub_priv->cmd;
9458
9459 ub_priv->flags |= EMLXS_UB_REPLY;
9460
9461 /* Reset oxid to ELS command */
9462 /* We do this because the ub is only valid */
9463 /* until we return from this thread */
9464 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
9465 }
9466
9467 /* Save the result */
9468 sbp->ucmd = ucmd;
9469
9470 if (sbp->channel == NULL) {
9471 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9472 sbp->channel = &hba->chan[hba->channel_els];
9473 } else {
9474 sbp->channel = &hba->chan[FC_ELS_RING];
9475 }
9476 }
9477
9478 /* Check for interceptions */
9479 switch (ucmd) {
9480
9481 #ifdef ULP_PATCH2
9482 case ELS_CMD_LOGO:
9483 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
9484 break;
9485 }
9486
9487 /* Check if this was generated by ULP and not us */
9488 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9489
9490 /*
9491 * Since we replied to this already,
9492 * we won't need to send this now
9493 */
9494 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9495
9496 return (FC_SUCCESS);
9497 }
9498
9499 break;
9500 #endif /* ULP_PATCH2 */
9501
9502 #ifdef ULP_PATCH3
9503 case ELS_CMD_PRLI:
9504 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
9505 break;
9506 }
9507
9508 /* Check if this was generated by ULP and not us */
9509 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9510
9511 /*
9512 * Since we replied to this already,
9513 * we won't need to send this now
9514 */
9515 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9516
9517 return (FC_SUCCESS);
9518 }
9519
9520 break;
9521 #endif /* ULP_PATCH3 */
9522
9523
9524 #ifdef ULP_PATCH4
9525 case ELS_CMD_PRLO:
9526 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
9527 break;
9528 }
9529
9530 /* Check if this was generated by ULP and not us */
9531 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9532 /*
9533 * Since we replied to this already,
9534 * we won't need to send this now
9535 */
9536 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9537
9538 return (FC_SUCCESS);
9539 }
9540
9541 break;
9542 #endif /* ULP_PATCH4 */
9543
9544 #ifdef ULP_PATCH6
9545 case ELS_CMD_RSCN:
9546 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
9547 break;
9548 }
9549
9550 /* Check if this RSCN was generated by us */
9551 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9552 cmd = *((uint32_t *)pkt->pkt_cmd);
9553 cmd = LE_SWAP32(cmd);
9554 cmd &= ELS_CMD_MASK;
9555
9556 /*
9557 * If ULP is accepting this,
9558 * then close affected node
9559 */
9560 if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9561 cmd == ELS_CMD_ACC) {
9562 fc_rscn_t *rscn;
9563 uint32_t count;
9564 uint32_t *lp;
9565
9566 /*
9567 * Only the Leadville code path will
9568 * come thru here. The RSCN data is NOT
9569 * swapped properly for the Comstar code
9570 * path.
9571 */
9572 lp = (uint32_t *)ub_buffer;
9573 rscn = (fc_rscn_t *)lp++;
9574 count =
9575 ((rscn->rscn_payload_len - 4) / 4);
9576
9577 /* Close affected ports */
9578 for (i = 0; i < count; i++, lp++) {
9579 (void) emlxs_port_offline(port,
9580 *lp);
9581 }
9582 }
9583
9584 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9585 "RSCN %s: did=%x oxid=%x rxid=%x. "
9586 "Intercepted.", emlxs_elscmd_xlate(cmd),
9587 did, pkt->pkt_cmd_fhdr.ox_id,
9588 pkt->pkt_cmd_fhdr.rx_id);
9589
9590 /*
9591 * Since we generated this RSCN,
9592 * we won't need to send this reply
9593 */
9594 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9595
9596 return (FC_SUCCESS);
9597 }
9598
9599 break;
9600 #endif /* ULP_PATCH6 */
9601
9602 case ELS_CMD_PLOGI:
9603 /* Check if this PLOGI was generated by us */
9604 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9605 cmd = *((uint32_t *)pkt->pkt_cmd);
9606 cmd = LE_SWAP32(cmd);
9607 cmd &= ELS_CMD_MASK;
9608
9609 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9610 "PLOGI %s: did=%x oxid=%x rxid=%x. "
9611 "Intercepted.", emlxs_elscmd_xlate(cmd),
9612 did, pkt->pkt_cmd_fhdr.ox_id,
9613 pkt->pkt_cmd_fhdr.rx_id);
9614
9615 /*
9616 * Since we generated this PLOGI,
9617 * we won't need to send this reply
9618 */
9619 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9620
9621 return (FC_SUCCESS);
9622 }
9623
9624 break;
9625 }
9626
9627 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9628 emlxs_swap_els_pkt(sbp);
9629 #endif /* EMLXS_MODREV2X */
9630
9631
9632 cmd = *((uint32_t *)pkt->pkt_cmd);
9633 cmd &= ELS_CMD_MASK;
9634
9635 /* Check if modifications are needed */
9636 switch (ucmd) {
9637 case (ELS_CMD_PRLI):
9638
9639 if (cmd == ELS_CMD_ACC) {
9640 /* This is a patch for the ULP stack. */
9641 /* ULP does not keep track of FCP2 support */
9642 if ((port->mode == MODE_INITIATOR) &&
9643 (hba->vpd.feaLevelHigh >= 0x02) &&
9644 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9645 els_pkt->un.prli.ConfmComplAllowed = 1;
9646 els_pkt->un.prli.Retry = 1;
9647 els_pkt->un.prli.TaskRetryIdReq = 1;
9648 } else {
9649 els_pkt->un.prli.ConfmComplAllowed = 0;
9650 els_pkt->un.prli.Retry = 0;
9651 els_pkt->un.prli.TaskRetryIdReq = 0;
9652 }
9653 }
9654
9655 break;
9656
9657 case ELS_CMD_FLOGI:
9658 case ELS_CMD_FDISC:
9659 if (cmd == ELS_CMD_ACC) {
9660 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9661
9662 /* This is a patch for the ULP stack. */
9663
9664 /*
9665 * ULP only reads our service parameters
9666 * once during bind_port, but the service
9667 * parameters change due to topology.
9668 */
9669
9670 /* Copy latest service parameters to payload */
9671 bcopy((void *)&port->sparam,
9672 (void *)sp, sizeof (SERV_PARM));
9673
9674 /* We are in pt-to-pt mode. Set R_A_TOV to default */
9675 sp->cmn.w2.r_a_tov =
9676 LE_SWAP32((FF_DEF_RATOV * 1000));
9677
9678 /* Clear support for virtual fabrics */
9679 /* randomOffset bit controls this for FLOGI */
9680 sp->cmn.randomOffset = 0;
9681 #ifdef DHCHAP_SUPPORT
9682 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9683 #endif /* DHCHAP_SUPPORT */
9684 }
9685 break;
9686
9687 case ELS_CMD_PLOGI:
9688 case ELS_CMD_PDISC:
9689 if (cmd == ELS_CMD_ACC) {
9690 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9691
9692 /* This is a patch for the ULP stack. */
9693
9694 /*
9695 * ULP only reads our service parameters
9696 * once during bind_port, but the service
9697 * parameters change due to topology.
9698 */
9699
9700 /* Copy latest service parameters to payload */
9701 bcopy((void *)&port->sparam,
9702 (void *)sp, sizeof (SERV_PARM));
9703
9704 #ifdef DHCHAP_SUPPORT
9705 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9706 #endif /* DHCHAP_SUPPORT */
9707 }
9708 break;
9709
9710 }
9711
9712 /* Initalize iocbq */
9713 iocbq->node = (void *)NULL;
9714 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9715
9716 if (rval == 0xff) {
9717 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9718 rval = FC_SUCCESS;
9719 }
9720
9721 return (rval);
9722 }
9723
9724 cp = &hba->chan[hba->channel_els];
9725 cp->ulpSendCmd++;
9726
9727 /* Initalize sbp */
9728 mutex_enter(&sbp->mtx);
9729 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9730 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9731 sbp->node = (void *) NULL;
9732 sbp->lun = EMLXS_LUN_NONE;
9733 sbp->class = iocb->ULPCLASS;
9734 sbp->did = did;
9735 mutex_exit(&sbp->mtx);
9736
9737 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9738 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
9739 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
9740 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
9741
9742 /* Process nodes */
9743 switch (ucmd) {
9744 case ELS_CMD_RSCN:
9745 if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9746 cmd == ELS_CMD_ACC) {
9747 fc_rscn_t *rscn;
9748 uint32_t count;
9749 uint32_t *lp = NULL;
9750
9751 /*
9752 * Only the Leadville code path will come thru
9753 * here. The RSCN data is NOT swapped properly
9754 * for the Comstar code path.
9755 */
9756 lp = (uint32_t *)ub_buffer;
9757 rscn = (fc_rscn_t *)lp++;
9758 count = ((rscn->rscn_payload_len - 4) / 4);
9759
9760 /* Close affected ports */
9761 for (i = 0; i < count; i++, lp++) {
9762 (void) emlxs_port_offline(port, *lp);
9763 }
9764 }
9765 break;
9766
9767 case ELS_CMD_PLOGI:
9768 if (cmd == ELS_CMD_ACC) {
9769 ndlp = emlxs_node_find_did(port, did, 1);
9770
9771 if (ndlp && ndlp->nlp_active) {
9772 /* Close the node for any further normal IO */
9773 emlxs_node_close(port, ndlp, hba->channel_fcp,
9774 pkt->pkt_timeout + 10);
9775 emlxs_node_close(port, ndlp, hba->channel_ip,
9776 pkt->pkt_timeout + 10);
9777
9778 /* Flush tx queue */
9779 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9780
9781 /* Flush chip queue */
9782 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9783 }
9784 }
9785 break;
9786
9787 case ELS_CMD_PRLI:
9788 if (cmd == ELS_CMD_ACC) {
9789 ndlp = emlxs_node_find_did(port, did, 1);
9790
9791 if (ndlp && ndlp->nlp_active) {
9792 /* Close the node for any further normal IO */
9793 emlxs_node_close(port, ndlp, hba->channel_fcp,
9794 pkt->pkt_timeout + 10);
9795
9796 /* Flush tx queues */
9797 (void) emlxs_tx_node_flush(port, ndlp,
9798 &hba->chan[hba->channel_fcp], 0, 0);
9799
9800 /* Flush chip queues */
9801 (void) emlxs_chipq_node_flush(port,
9802 &hba->chan[hba->channel_fcp], ndlp, 0);
9803 }
9804 }
9805 break;
9806
9807 case ELS_CMD_PRLO:
9808 if (cmd == ELS_CMD_ACC) {
9809 ndlp = emlxs_node_find_did(port, did, 1);
9810
9811 if (ndlp && ndlp->nlp_active) {
9812 /* Close the node for any further normal IO */
9813 emlxs_node_close(port, ndlp,
9814 hba->channel_fcp, 60);
9815
9816 /* Flush tx queues */
9817 (void) emlxs_tx_node_flush(port, ndlp,
9818 &hba->chan[hba->channel_fcp], 0, 0);
9819
9820 /* Flush chip queues */
9821 (void) emlxs_chipq_node_flush(port,
9822 &hba->chan[hba->channel_fcp], ndlp, 0);
9823 }
9824 }
9825
9826 break;
9827
9828 case ELS_CMD_LOGO:
9829 if (cmd == ELS_CMD_ACC) {
9830 ndlp = emlxs_node_find_did(port, did, 1);
9831
9832 if (ndlp && ndlp->nlp_active) {
9833 /* Close the node for any further normal IO */
9834 emlxs_node_close(port, ndlp,
9835 hba->channel_fcp, 60);
9836 emlxs_node_close(port, ndlp,
9837 hba->channel_ip, 60);
9838
9839 /* Flush tx queues */
9840 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9841
9842 /* Flush chip queues */
9843 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9844 }
9845 }
9846
9847 break;
9848 }
9849
9850 if (pkt->pkt_cmdlen) {
9851 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9852 DDI_DMA_SYNC_FORDEV);
9853 }
9854
9855 HBASTATS.ElsRspIssued++;
9856
9857 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9858
9859 return (FC_SUCCESS);
9860
9861 } /* emlxs_send_els_rsp() */
9862
9863
9864 #ifdef MENLO_SUPPORT
9865 static int32_t
emlxs_send_menlo(emlxs_port_t * port,emlxs_buf_t * sbp)9866 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
9867 {
9868 emlxs_hba_t *hba = HBA;
9869 fc_packet_t *pkt;
9870 IOCBQ *iocbq;
9871 IOCB *iocb;
9872 CHANNEL *cp;
9873 NODELIST *ndlp;
9874 uint32_t did;
9875 uint32_t *lp;
9876 int32_t rval;
9877
9878 pkt = PRIV2PKT(sbp);
9879 did = EMLXS_MENLO_DID;
9880 lp = (uint32_t *)pkt->pkt_cmd;
9881
9882 iocbq = &sbp->iocbq;
9883 iocb = &iocbq->iocb;
9884
9885 ndlp = emlxs_node_find_did(port, did, 1);
9886
9887 if (!ndlp || !ndlp->nlp_active) {
9888 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9889 "Node not found. did=0x%x", did);
9890
9891 return (FC_BADPACKET);
9892 }
9893
9894 iocbq->node = (void *) ndlp;
9895 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9896
9897 if (rval == 0xff) {
9898 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9899 rval = FC_SUCCESS;
9900 }
9901
9902 return (rval);
9903 }
9904
9905 cp = &hba->chan[hba->channel_ct];
9906 cp->ulpSendCmd++;
9907
9908 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9909 /* Cmd phase */
9910
9911 /* Initalize iocb */
9912 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9913 iocb->ULPCONTEXT = 0;
9914 iocb->ULPPU = 3;
9915
9916 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9917 "%s: [%08x,%08x,%08x,%08x]",
9918 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9919 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9920
9921 } else { /* FC_PKT_OUTBOUND */
9922
9923 /* MENLO_CMD_FW_DOWNLOAD Data Phase */
9924 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9925
9926 /* Initalize iocb */
9927 iocb->un.genreq64.param = 0;
9928 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9929 iocb->ULPPU = 1;
9930
9931 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9932 "%s: Data: rxid=0x%x size=%d",
9933 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9934 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9935 }
9936
9937 /* Initalize sbp */
9938 mutex_enter(&sbp->mtx);
9939 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9940 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9941 sbp->node = (void *) ndlp;
9942 sbp->lun = EMLXS_LUN_NONE;
9943 sbp->class = iocb->ULPCLASS;
9944 sbp->did = did;
9945 mutex_exit(&sbp->mtx);
9946
9947 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9948 DDI_DMA_SYNC_FORDEV);
9949
9950 HBASTATS.CtCmdIssued++;
9951
9952 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9953
9954 return (FC_SUCCESS);
9955
9956 } /* emlxs_send_menlo() */
9957 #endif /* MENLO_SUPPORT */
9958
9959
9960 static int32_t
emlxs_send_ct(emlxs_port_t * port,emlxs_buf_t * sbp)9961 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9962 {
9963 emlxs_hba_t *hba = HBA;
9964 fc_packet_t *pkt;
9965 IOCBQ *iocbq;
9966 IOCB *iocb;
9967 NODELIST *ndlp;
9968 uint32_t did;
9969 CHANNEL *cp;
9970 int32_t rval;
9971
9972 pkt = PRIV2PKT(sbp);
9973 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9974
9975 iocbq = &sbp->iocbq;
9976 iocb = &iocbq->iocb;
9977
9978 ndlp = emlxs_node_find_did(port, did, 1);
9979
9980 if (!ndlp || !ndlp->nlp_active) {
9981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9982 "Node not found. did=0x%x", did);
9983
9984 return (FC_BADPACKET);
9985 }
9986
9987 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9988 emlxs_swap_ct_pkt(sbp);
9989 #endif /* EMLXS_MODREV2X */
9990
9991 iocbq->node = (void *)ndlp;
9992 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9993
9994 if (rval == 0xff) {
9995 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9996 rval = FC_SUCCESS;
9997 }
9998
9999 return (rval);
10000 }
10001
10002 cp = &hba->chan[hba->channel_ct];
10003 cp->ulpSendCmd++;
10004
10005 /* Initalize sbp */
10006 mutex_enter(&sbp->mtx);
10007 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10008 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10009 sbp->node = (void *)ndlp;
10010 sbp->lun = EMLXS_LUN_NONE;
10011 sbp->class = iocb->ULPCLASS;
10012 sbp->did = did;
10013 mutex_exit(&sbp->mtx);
10014
10015 if (did == NAMESERVER_DID) {
10016 SLI_CT_REQUEST *CtCmd;
10017 uint32_t *lp0;
10018
10019 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10020 lp0 = (uint32_t *)pkt->pkt_cmd;
10021
10022 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10023 "%s: did=%x [%08x,%08x]",
10024 emlxs_ctcmd_xlate(
10025 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10026 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10027
10028 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
10029 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
10030 }
10031
10032 } else if (did == FDMI_DID) {
10033 SLI_CT_REQUEST *CtCmd;
10034 uint32_t *lp0;
10035
10036 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10037 lp0 = (uint32_t *)pkt->pkt_cmd;
10038
10039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10040 "%s: did=%x [%08x,%08x]",
10041 emlxs_mscmd_xlate(
10042 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10043 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10044 } else {
10045 SLI_CT_REQUEST *CtCmd;
10046 uint32_t *lp0;
10047
10048 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10049 lp0 = (uint32_t *)pkt->pkt_cmd;
10050
10051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10052 "%s: did=%x [%08x,%08x]",
10053 emlxs_rmcmd_xlate(
10054 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10055 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10056 }
10057
10058 if (pkt->pkt_cmdlen) {
10059 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10060 DDI_DMA_SYNC_FORDEV);
10061 }
10062
10063 HBASTATS.CtCmdIssued++;
10064
10065 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10066
10067 return (FC_SUCCESS);
10068
10069 } /* emlxs_send_ct() */
10070
10071
10072 static int32_t
emlxs_send_ct_rsp(emlxs_port_t * port,emlxs_buf_t * sbp)10073 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
10074 {
10075 emlxs_hba_t *hba = HBA;
10076 fc_packet_t *pkt;
10077 CHANNEL *cp;
10078 IOCBQ *iocbq;
10079 IOCB *iocb;
10080 uint32_t *cmd;
10081 SLI_CT_REQUEST *CtCmd;
10082 int32_t rval;
10083
10084 pkt = PRIV2PKT(sbp);
10085 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10086 cmd = (uint32_t *)pkt->pkt_cmd;
10087
10088 iocbq = &sbp->iocbq;
10089 iocb = &iocbq->iocb;
10090
10091 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10092 emlxs_swap_ct_pkt(sbp);
10093 #endif /* EMLXS_MODREV2X */
10094
10095 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
10096
10097 if (rval == 0xff) {
10098 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
10099 rval = FC_SUCCESS;
10100 }
10101
10102 return (rval);
10103 }
10104
10105 cp = &hba->chan[hba->channel_ct];
10106 cp->ulpSendCmd++;
10107
10108 /* Initalize sbp */
10109 mutex_enter(&sbp->mtx);
10110 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10111 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10112 sbp->node = NULL;
10113 sbp->lun = EMLXS_LUN_NONE;
10114 sbp->class = iocb->ULPCLASS;
10115 mutex_exit(&sbp->mtx);
10116
10117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
10118 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
10119 emlxs_rmcmd_xlate(LE_SWAP16(
10120 CtCmd->CommandResponse.bits.CmdRsp)),
10121 CtCmd->ReasonCode, CtCmd->Explanation,
10122 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
10123 pkt->pkt_cmd_fhdr.rx_id);
10124
10125 if (pkt->pkt_cmdlen) {
10126 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10127 DDI_DMA_SYNC_FORDEV);
10128 }
10129
10130 HBASTATS.CtRspIssued++;
10131
10132 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10133
10134 return (FC_SUCCESS);
10135
10136 } /* emlxs_send_ct_rsp() */
10137
10138
10139 /*
10140 * emlxs_get_instance()
10141 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
10142 */
10143 extern uint32_t
emlxs_get_instance(int32_t ddiinst)10144 emlxs_get_instance(int32_t ddiinst)
10145 {
10146 uint32_t i;
10147 uint32_t inst;
10148
10149 mutex_enter(&emlxs_device.lock);
10150
10151 inst = MAX_FC_BRDS;
10152 for (i = 0; i < emlxs_instance_count; i++) {
10153 if (emlxs_instance[i] == ddiinst) {
10154 inst = i;
10155 break;
10156 }
10157 }
10158
10159 mutex_exit(&emlxs_device.lock);
10160
10161 return (inst);
10162
10163 } /* emlxs_get_instance() */
10164
10165
10166 /*
10167 * emlxs_add_instance()
10168 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
10169 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
10170 */
10171 static uint32_t
emlxs_add_instance(int32_t ddiinst)10172 emlxs_add_instance(int32_t ddiinst)
10173 {
10174 uint32_t i;
10175
10176 mutex_enter(&emlxs_device.lock);
10177
10178 /* First see if the ddiinst already exists */
10179 for (i = 0; i < emlxs_instance_count; i++) {
10180 if (emlxs_instance[i] == ddiinst) {
10181 break;
10182 }
10183 }
10184
10185 /* If it doesn't already exist, add it */
10186 if (i >= emlxs_instance_count) {
10187 if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
10188 emlxs_instance[i] = ddiinst;
10189 emlxs_instance_count++;
10190 emlxs_device.hba_count = emlxs_instance_count;
10191 }
10192 }
10193
10194 mutex_exit(&emlxs_device.lock);
10195
10196 return (i);
10197
10198 } /* emlxs_add_instance() */
10199
10200
10201 /*ARGSUSED*/
10202 extern void
emlxs_pkt_complete(emlxs_buf_t * sbp,uint32_t iostat,uint8_t localstat,uint32_t doneq)10203 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10204 uint32_t doneq)
10205 {
10206 emlxs_hba_t *hba;
10207 emlxs_port_t *port;
10208 emlxs_buf_t *fpkt;
10209
10210 port = sbp->port;
10211
10212 if (!port) {
10213 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
10214 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
10215
10216 return;
10217 }
10218
10219 hba = HBA;
10220
10221 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
10222 (sbp->iotag)) {
10223 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
10224 "WARNING: Completing IO with iotag. sbp=%p iotag=%d "
10225 "xri_flags=%x",
10226 sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0));
10227
10228 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
10229 }
10230
10231 mutex_enter(&sbp->mtx);
10232
10233 /* Check for error conditions */
10234 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
10235 PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
10236 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
10237 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10238 EMLXS_MSGF(EMLXS_CONTEXT,
10239 &emlxs_pkt_completion_error_msg,
10240 "Packet already returned. sbp=%p flags=%x", sbp,
10241 sbp->pkt_flags);
10242 }
10243
10244 else if (sbp->pkt_flags & PACKET_COMPLETED) {
10245 EMLXS_MSGF(EMLXS_CONTEXT,
10246 &emlxs_pkt_completion_error_msg,
10247 "Packet already completed. sbp=%p flags=%x", sbp,
10248 sbp->pkt_flags);
10249 }
10250
10251 else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
10252 EMLXS_MSGF(EMLXS_CONTEXT,
10253 &emlxs_pkt_completion_error_msg,
10254 "Pkt already on done queue. sbp=%p flags=%x", sbp,
10255 sbp->pkt_flags);
10256 }
10257
10258 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
10259 EMLXS_MSGF(EMLXS_CONTEXT,
10260 &emlxs_pkt_completion_error_msg,
10261 "Packet already in completion. sbp=%p flags=%x",
10262 sbp, sbp->pkt_flags);
10263 }
10264
10265 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
10266 EMLXS_MSGF(EMLXS_CONTEXT,
10267 &emlxs_pkt_completion_error_msg,
10268 "Packet still on chip queue. sbp=%p flags=%x",
10269 sbp, sbp->pkt_flags);
10270 }
10271
10272 else if (sbp->pkt_flags & PACKET_IN_TXQ) {
10273 EMLXS_MSGF(EMLXS_CONTEXT,
10274 &emlxs_pkt_completion_error_msg,
10275 "Packet still on tx queue. sbp=%p flags=%x", sbp,
10276 sbp->pkt_flags);
10277 }
10278
10279 mutex_exit(&sbp->mtx);
10280 return;
10281 }
10282
10283 /* Packet is now in completion */
10284 sbp->pkt_flags |= PACKET_IN_COMPLETION;
10285
10286 /* Set the state if not already set */
10287 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10288 emlxs_set_pkt_state(sbp, iostat, localstat, 0);
10289 }
10290
10291 /* Check for parent flush packet */
10292 /* If pkt has a parent flush packet then adjust its count now */
10293 fpkt = sbp->fpkt;
10294 if (fpkt) {
10295 /*
10296 * We will try to NULL sbp->fpkt inside the
10297 * fpkt's mutex if possible
10298 */
10299
10300 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
10301 mutex_enter(&fpkt->mtx);
10302 if (fpkt->flush_count) {
10303 fpkt->flush_count--;
10304 }
10305 sbp->fpkt = NULL;
10306 mutex_exit(&fpkt->mtx);
10307 } else { /* fpkt has been returned already */
10308
10309 sbp->fpkt = NULL;
10310 }
10311 }
10312
10313 /* If pkt is polled, then wake up sleeping thread */
10314 if (sbp->pkt_flags & PACKET_POLLED) {
10315 /* Don't set the PACKET_ULP_OWNED flag here */
10316 /* because the polling thread will do it */
10317 sbp->pkt_flags |= PACKET_COMPLETED;
10318 mutex_exit(&sbp->mtx);
10319
10320 /* Wake up sleeping thread */
10321 mutex_enter(&EMLXS_PKT_LOCK);
10322 cv_broadcast(&EMLXS_PKT_CV);
10323 mutex_exit(&EMLXS_PKT_LOCK);
10324 }
10325
10326 /* If packet was generated by our driver, */
10327 /* then complete it immediately */
10328 else if (sbp->pkt_flags & PACKET_ALLOCATED) {
10329 mutex_exit(&sbp->mtx);
10330
10331 emlxs_iodone(sbp);
10332 }
10333
10334 /* Put the pkt on the done queue for callback */
10335 /* completion in another thread */
10336 else {
10337 sbp->pkt_flags |= PACKET_IN_DONEQ;
10338 sbp->next = NULL;
10339 mutex_exit(&sbp->mtx);
10340
10341 /* Put pkt on doneq, so I/O's will be completed in order */
10342 mutex_enter(&EMLXS_PORT_LOCK);
10343 if (hba->iodone_tail == NULL) {
10344 hba->iodone_list = sbp;
10345 hba->iodone_count = 1;
10346 } else {
10347 hba->iodone_tail->next = sbp;
10348 hba->iodone_count++;
10349 }
10350 hba->iodone_tail = sbp;
10351 mutex_exit(&EMLXS_PORT_LOCK);
10352
10353 /* Trigger a thread to service the doneq */
10354 emlxs_thread_trigger1(&hba->iodone_thread,
10355 emlxs_iodone_server);
10356 }
10357
10358 return;
10359
10360 } /* emlxs_pkt_complete() */
10361
10362
10363 #ifdef SAN_DIAG_SUPPORT
10364 /*
10365 * This routine is called with EMLXS_PORT_LOCK held so we can just increment
10366 * normally. Don't have to use atomic operations.
10367 */
10368 extern void
emlxs_update_sd_bucket(emlxs_buf_t * sbp)10369 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
10370 {
10371 emlxs_port_t *vport;
10372 fc_packet_t *pkt;
10373 uint32_t did;
10374 hrtime_t t;
10375 hrtime_t delta_time;
10376 int i;
10377 NODELIST *ndlp;
10378
10379 vport = sbp->port;
10380
10381 if ((emlxs_sd_bucket.search_type == 0) ||
10382 (vport->sd_io_latency_state != SD_COLLECTING)) {
10383 return;
10384 }
10385
10386 /* Compute the iolatency time in microseconds */
10387 t = gethrtime();
10388 delta_time = t - sbp->sd_start_time;
10389 pkt = PRIV2PKT(sbp);
10390 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
10391 ndlp = emlxs_node_find_did(vport, did, 1);
10392
10393 if (!ndlp) {
10394 return;
10395 }
10396
10397 if (delta_time >=
10398 emlxs_sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) {
10399 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
10400 count++;
10401 } else if (delta_time <= emlxs_sd_bucket.values[0]) {
10402 ndlp->sd_dev_bucket[0].count++;
10403 } else {
10404 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
10405 if ((delta_time > emlxs_sd_bucket.values[i-1]) &&
10406 (delta_time <= emlxs_sd_bucket.values[i])) {
10407 ndlp->sd_dev_bucket[i].count++;
10408 break;
10409 }
10410 }
10411 }
10412
10413 return;
10414
10415 } /* emlxs_update_sd_bucket() */
10416 #endif /* SAN_DIAG_SUPPORT */
10417
10418 /*ARGSUSED*/
10419 static void
emlxs_iodone_server(void * arg1,void * arg2,void * arg3)10420 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
10421 {
10422 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
10423 emlxs_buf_t *sbp;
10424
10425 mutex_enter(&EMLXS_PORT_LOCK);
10426
10427 /* Remove one pkt from the doneq head and complete it */
10428 while ((sbp = hba->iodone_list) != NULL) {
10429 if ((hba->iodone_list = sbp->next) == NULL) {
10430 hba->iodone_tail = NULL;
10431 hba->iodone_count = 0;
10432 } else {
10433 hba->iodone_count--;
10434 }
10435
10436 mutex_exit(&EMLXS_PORT_LOCK);
10437
10438 /* Prepare the pkt for completion */
10439 mutex_enter(&sbp->mtx);
10440 sbp->next = NULL;
10441 sbp->pkt_flags &= ~PACKET_IN_DONEQ;
10442 mutex_exit(&sbp->mtx);
10443
10444 /* Complete the IO now */
10445 emlxs_iodone(sbp);
10446
10447 /* Reacquire lock and check if more work is to be done */
10448 mutex_enter(&EMLXS_PORT_LOCK);
10449 }
10450
10451 mutex_exit(&EMLXS_PORT_LOCK);
10452
10453 #ifdef FMA_SUPPORT
10454 if (hba->flag & FC_DMA_CHECK_ERROR) {
10455 emlxs_thread_spawn(hba, emlxs_restart_thread,
10456 NULL, NULL);
10457 }
10458 #endif /* FMA_SUPPORT */
10459
10460 return;
10461
10462 } /* End emlxs_iodone_server */
10463
10464
10465 static void
emlxs_iodone(emlxs_buf_t * sbp)10466 emlxs_iodone(emlxs_buf_t *sbp)
10467 {
10468 #ifdef FMA_SUPPORT
10469 emlxs_port_t *port = sbp->port;
10470 emlxs_hba_t *hba = port->hba;
10471 #endif /* FMA_SUPPORT */
10472
10473 fc_packet_t *pkt;
10474 CHANNEL *cp;
10475
10476 pkt = PRIV2PKT(sbp);
10477
10478 /* Check one more time that the pkt has not already been returned */
10479 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10480 return;
10481 }
10482
10483 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10484 emlxs_unswap_pkt(sbp);
10485 #endif /* EMLXS_MODREV2X */
10486
10487 mutex_enter(&sbp->mtx);
10488 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
10489 mutex_exit(&sbp->mtx);
10490
10491 if (pkt->pkt_comp) {
10492 #ifdef FMA_SUPPORT
10493 emlxs_check_dma(hba, sbp);
10494 #endif /* FMA_SUPPORT */
10495
10496 if (sbp->channel) {
10497 cp = (CHANNEL *)sbp->channel;
10498 cp->ulpCmplCmd++;
10499 }
10500
10501 (*pkt->pkt_comp) (pkt);
10502 }
10503
10504 return;
10505
10506 } /* emlxs_iodone() */
10507
10508
10509
10510 extern fc_unsol_buf_t *
emlxs_ub_find(emlxs_port_t * port,uint32_t token)10511 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
10512 {
10513 emlxs_unsol_buf_t *pool;
10514 fc_unsol_buf_t *ubp;
10515 emlxs_ub_priv_t *ub_priv;
10516
10517 /* Check if this is a valid ub token */
10518 if (token < EMLXS_UB_TOKEN_OFFSET) {
10519 return (NULL);
10520 }
10521
10522 mutex_enter(&EMLXS_UB_LOCK);
10523
10524 pool = port->ub_pool;
10525 while (pool) {
10526 /* Find a pool with the proper token range */
10527 if (token >= pool->pool_first_token &&
10528 token <= pool->pool_last_token) {
10529 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
10530 pool->pool_first_token)];
10531 ub_priv = ubp->ub_fca_private;
10532
10533 if (ub_priv->token != token) {
10534 EMLXS_MSGF(EMLXS_CONTEXT,
10535 &emlxs_sfs_debug_msg,
10536 "ub_find: Invalid token=%x", ubp, token,
10537 ub_priv->token);
10538
10539 ubp = NULL;
10540 }
10541
10542 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
10543 EMLXS_MSGF(EMLXS_CONTEXT,
10544 &emlxs_sfs_debug_msg,
10545 "ub_find: Buffer not in use. buffer=%p "
10546 "token=%x", ubp, token);
10547
10548 ubp = NULL;
10549 }
10550
10551 mutex_exit(&EMLXS_UB_LOCK);
10552
10553 return (ubp);
10554 }
10555
10556 pool = pool->pool_next;
10557 }
10558
10559 mutex_exit(&EMLXS_UB_LOCK);
10560
10561 return (NULL);
10562
10563 } /* emlxs_ub_find() */
10564
10565
10566
10567 extern fc_unsol_buf_t *
emlxs_ub_get(emlxs_port_t * port,uint32_t size,uint32_t type,uint32_t reserve)10568 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
10569 uint32_t reserve)
10570 {
10571 emlxs_hba_t *hba = HBA;
10572 emlxs_unsol_buf_t *pool;
10573 fc_unsol_buf_t *ubp;
10574 emlxs_ub_priv_t *ub_priv;
10575 uint32_t i;
10576 uint32_t resv_flag;
10577 uint32_t pool_free;
10578 uint32_t pool_free_resv;
10579
10580 mutex_enter(&EMLXS_UB_LOCK);
10581
10582 pool = port->ub_pool;
10583 while (pool) {
10584 /* Find a pool of the appropriate type and size */
10585 if ((pool->pool_available == 0) ||
10586 (pool->pool_type != type) ||
10587 (pool->pool_buf_size < size)) {
10588 goto next_pool;
10589 }
10590
10591
10592 /* Adjust free counts based on availablity */
10593 /* The free reserve count gets first priority */
10594 pool_free_resv =
10595 min(pool->pool_free_resv, pool->pool_available);
10596 pool_free =
10597 min(pool->pool_free,
10598 (pool->pool_available - pool_free_resv));
10599
10600 /* Initialize reserve flag */
10601 resv_flag = reserve;
10602
10603 if (resv_flag) {
10604 if (pool_free_resv == 0) {
10605 if (pool_free == 0) {
10606 goto next_pool;
10607 }
10608 resv_flag = 0;
10609 }
10610 } else if (pool_free == 0) {
10611 goto next_pool;
10612 }
10613
10614 /* Find next available free buffer in this pool */
10615 for (i = 0; i < pool->pool_nentries; i++) {
10616 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
10617 ub_priv = ubp->ub_fca_private;
10618
10619 if (!ub_priv->available ||
10620 ub_priv->flags != EMLXS_UB_FREE) {
10621 continue;
10622 }
10623
10624 ub_priv->time = hba->timer_tics;
10625
10626 /* Timeout in 5 minutes */
10627 ub_priv->timeout = (5 * 60);
10628
10629 ub_priv->flags = EMLXS_UB_IN_USE;
10630
10631 /* Alloc the buffer from the pool */
10632 if (resv_flag) {
10633 ub_priv->flags |= EMLXS_UB_RESV;
10634 pool->pool_free_resv--;
10635 } else {
10636 pool->pool_free--;
10637 }
10638
10639 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
10640 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
10641 ub_priv->token, pool->pool_nentries,
10642 pool->pool_available, pool->pool_free,
10643 pool->pool_free_resv);
10644
10645 mutex_exit(&EMLXS_UB_LOCK);
10646
10647 return (ubp);
10648 }
10649 next_pool:
10650
10651 pool = pool->pool_next;
10652 }
10653
10654 mutex_exit(&EMLXS_UB_LOCK);
10655
10656 return (NULL);
10657
10658 } /* emlxs_ub_get() */
10659
10660
10661
10662 extern void
emlxs_set_pkt_state(emlxs_buf_t * sbp,uint32_t iostat,uint8_t localstat,uint32_t lock)10663 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10664 uint32_t lock)
10665 {
10666 fc_packet_t *pkt;
10667 fcp_rsp_t *fcp_rsp;
10668 uint32_t i;
10669 emlxs_xlat_err_t *tptr;
10670 emlxs_xlat_err_t *entry;
10671
10672
10673 pkt = PRIV2PKT(sbp);
10674
10675 /* Warning: Some FCT sbp's don't have */
10676 /* fc_packet objects, so just return */
10677 if (!pkt) {
10678 return;
10679 }
10680
10681 if (lock) {
10682 mutex_enter(&sbp->mtx);
10683 }
10684
10685 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10686 sbp->pkt_flags |= PACKET_STATE_VALID;
10687
10688 /* Perform table lookup */
10689 entry = NULL;
10690 if (iostat != IOSTAT_LOCAL_REJECT) {
10691 tptr = emlxs_iostat_tbl;
10692 for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
10693 if (iostat == tptr->emlxs_status) {
10694 entry = tptr;
10695 break;
10696 }
10697 }
10698 } else { /* iostate == IOSTAT_LOCAL_REJECT */
10699
10700 tptr = emlxs_ioerr_tbl;
10701 for (i = 0; i < IOERR_MAX; i++, tptr++) {
10702 if (localstat == tptr->emlxs_status) {
10703 entry = tptr;
10704 break;
10705 }
10706 }
10707 }
10708
10709 if (entry) {
10710 pkt->pkt_state = entry->pkt_state;
10711 pkt->pkt_reason = entry->pkt_reason;
10712 pkt->pkt_expln = entry->pkt_expln;
10713 pkt->pkt_action = entry->pkt_action;
10714 } else {
10715 /* Set defaults */
10716 pkt->pkt_state = FC_PKT_TRAN_ERROR;
10717 pkt->pkt_reason = FC_REASON_ABORTED;
10718 pkt->pkt_expln = FC_EXPLN_NONE;
10719 pkt->pkt_action = FC_ACTION_RETRYABLE;
10720 }
10721
10722
10723 /* Set the residual counts and response frame */
10724 /* Check if response frame was received from the chip */
10725 /* If so, then the residual counts will already be set */
10726 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
10727 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
10728 /* We have to create the response frame */
10729 if (iostat == IOSTAT_SUCCESS) {
10730 pkt->pkt_resp_resid = 0;
10731 pkt->pkt_data_resid = 0;
10732
10733 if ((pkt->pkt_cmd_fhdr.type ==
10734 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
10735 pkt->pkt_resp) {
10736 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
10737
10738 fcp_rsp->fcp_u.fcp_status.
10739 rsp_len_set = 1;
10740 fcp_rsp->fcp_response_len = 8;
10741 }
10742 } else {
10743 /* Otherwise assume no data */
10744 /* and no response received */
10745 pkt->pkt_data_resid = pkt->pkt_datalen;
10746 pkt->pkt_resp_resid = pkt->pkt_rsplen;
10747 }
10748 }
10749 }
10750
10751 if (lock) {
10752 mutex_exit(&sbp->mtx);
10753 }
10754
10755 return;
10756
10757 } /* emlxs_set_pkt_state() */
10758
10759
10760 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10761
10762 extern void
emlxs_swap_service_params(SERV_PARM * sp)10763 emlxs_swap_service_params(SERV_PARM *sp)
10764 {
10765 uint16_t *p;
10766 int size;
10767 int i;
10768
10769 size = (sizeof (CSP) - 4) / 2;
10770 p = (uint16_t *)&sp->cmn;
10771 for (i = 0; i < size; i++) {
10772 p[i] = LE_SWAP16(p[i]);
10773 }
10774 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
10775
10776 size = sizeof (CLASS_PARMS) / 2;
10777 p = (uint16_t *)&sp->cls1;
10778 for (i = 0; i < size; i++, p++) {
10779 *p = LE_SWAP16(*p);
10780 }
10781
10782 size = sizeof (CLASS_PARMS) / 2;
10783 p = (uint16_t *)&sp->cls2;
10784 for (i = 0; i < size; i++, p++) {
10785 *p = LE_SWAP16(*p);
10786 }
10787
10788 size = sizeof (CLASS_PARMS) / 2;
10789 p = (uint16_t *)&sp->cls3;
10790 for (i = 0; i < size; i++, p++) {
10791 *p = LE_SWAP16(*p);
10792 }
10793
10794 size = sizeof (CLASS_PARMS) / 2;
10795 p = (uint16_t *)&sp->cls4;
10796 for (i = 0; i < size; i++, p++) {
10797 *p = LE_SWAP16(*p);
10798 }
10799
10800 return;
10801
10802 } /* emlxs_swap_service_params() */
10803
10804 extern void
emlxs_unswap_pkt(emlxs_buf_t * sbp)10805 emlxs_unswap_pkt(emlxs_buf_t *sbp)
10806 {
10807 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10808 emlxs_swap_fcp_pkt(sbp);
10809 }
10810
10811 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10812 emlxs_swap_els_pkt(sbp);
10813 }
10814
10815 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10816 emlxs_swap_ct_pkt(sbp);
10817 }
10818
10819 } /* emlxs_unswap_pkt() */
10820
10821
10822 extern void
emlxs_swap_fcp_pkt(emlxs_buf_t * sbp)10823 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
10824 {
10825 fc_packet_t *pkt;
10826 FCP_CMND *cmd;
10827 fcp_rsp_t *rsp;
10828 uint16_t *lunp;
10829 uint32_t i;
10830
10831 mutex_enter(&sbp->mtx);
10832
10833 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10834 mutex_exit(&sbp->mtx);
10835 return;
10836 }
10837
10838 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10839 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
10840 } else {
10841 sbp->pkt_flags |= PACKET_FCP_SWAPPED;
10842 }
10843
10844 mutex_exit(&sbp->mtx);
10845
10846 pkt = PRIV2PKT(sbp);
10847
10848 cmd = (FCP_CMND *)pkt->pkt_cmd;
10849 rsp = (pkt->pkt_rsplen &&
10850 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
10851 (fcp_rsp_t *)pkt->pkt_resp : NULL;
10852
10853 /* The size of data buffer needs to be swapped. */
10854 cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
10855
10856 /*
10857 * Swap first 2 words of FCP CMND payload.
10858 */
10859 lunp = (uint16_t *)&cmd->fcpLunMsl;
10860 for (i = 0; i < 4; i++) {
10861 lunp[i] = LE_SWAP16(lunp[i]);
10862 }
10863
10864 if (rsp) {
10865 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
10866 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
10867 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
10868 }
10869
10870 return;
10871
10872 } /* emlxs_swap_fcp_pkt() */
10873
10874
10875 extern void
emlxs_swap_els_pkt(emlxs_buf_t * sbp)10876 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
10877 {
10878 fc_packet_t *pkt;
10879 uint32_t *cmd;
10880 uint32_t *rsp;
10881 uint32_t command;
10882 uint16_t *c;
10883 uint32_t i;
10884 uint32_t swapped;
10885
10886 mutex_enter(&sbp->mtx);
10887
10888 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10889 mutex_exit(&sbp->mtx);
10890 return;
10891 }
10892
10893 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10894 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
10895 swapped = 1;
10896 } else {
10897 sbp->pkt_flags |= PACKET_ELS_SWAPPED;
10898 swapped = 0;
10899 }
10900
10901 mutex_exit(&sbp->mtx);
10902
10903 pkt = PRIV2PKT(sbp);
10904
10905 cmd = (uint32_t *)pkt->pkt_cmd;
10906 rsp = (pkt->pkt_rsplen &&
10907 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10908 (uint32_t *)pkt->pkt_resp : NULL;
10909
10910 if (!swapped) {
10911 cmd[0] = LE_SWAP32(cmd[0]);
10912 command = cmd[0] & ELS_CMD_MASK;
10913 } else {
10914 command = cmd[0] & ELS_CMD_MASK;
10915 cmd[0] = LE_SWAP32(cmd[0]);
10916 }
10917
10918 if (rsp) {
10919 rsp[0] = LE_SWAP32(rsp[0]);
10920 }
10921
10922 switch (command) {
10923 case ELS_CMD_ACC:
10924 if (sbp->ucmd == ELS_CMD_ADISC) {
10925 /* Hard address of originator */
10926 cmd[1] = LE_SWAP32(cmd[1]);
10927
10928 /* N_Port ID of originator */
10929 cmd[6] = LE_SWAP32(cmd[6]);
10930 }
10931 break;
10932
10933 case ELS_CMD_PLOGI:
10934 case ELS_CMD_FLOGI:
10935 case ELS_CMD_FDISC:
10936 if (rsp) {
10937 emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10938 }
10939 break;
10940
10941 case ELS_CMD_LOGO:
10942 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */
10943 break;
10944
10945 case ELS_CMD_RLS:
10946 cmd[1] = LE_SWAP32(cmd[1]);
10947
10948 if (rsp) {
10949 for (i = 0; i < 6; i++) {
10950 rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10951 }
10952 }
10953 break;
10954
10955 case ELS_CMD_ADISC:
10956 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */
10957 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */
10958 break;
10959
10960 case ELS_CMD_PRLI:
10961 c = (uint16_t *)&cmd[1];
10962 c[1] = LE_SWAP16(c[1]);
10963
10964 cmd[4] = LE_SWAP32(cmd[4]);
10965
10966 if (rsp) {
10967 rsp[4] = LE_SWAP32(rsp[4]);
10968 }
10969 break;
10970
10971 case ELS_CMD_SCR:
10972 cmd[1] = LE_SWAP32(cmd[1]);
10973 break;
10974
10975 case ELS_CMD_LINIT:
10976 if (rsp) {
10977 rsp[1] = LE_SWAP32(rsp[1]);
10978 }
10979 break;
10980
10981 default:
10982 break;
10983 }
10984
10985 return;
10986
10987 } /* emlxs_swap_els_pkt() */
10988
10989
10990 extern void
emlxs_swap_ct_pkt(emlxs_buf_t * sbp)10991 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10992 {
10993 fc_packet_t *pkt;
10994 uint32_t *cmd;
10995 uint32_t *rsp;
10996 uint32_t command;
10997 uint32_t i;
10998 uint32_t swapped;
10999
11000 mutex_enter(&sbp->mtx);
11001
11002 if (sbp->pkt_flags & PACKET_ALLOCATED) {
11003 mutex_exit(&sbp->mtx);
11004 return;
11005 }
11006
11007 if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
11008 sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
11009 swapped = 1;
11010 } else {
11011 sbp->pkt_flags |= PACKET_CT_SWAPPED;
11012 swapped = 0;
11013 }
11014
11015 mutex_exit(&sbp->mtx);
11016
11017 pkt = PRIV2PKT(sbp);
11018
11019 cmd = (uint32_t *)pkt->pkt_cmd;
11020 rsp = (pkt->pkt_rsplen &&
11021 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
11022 (uint32_t *)pkt->pkt_resp : NULL;
11023
11024 if (!swapped) {
11025 cmd[0] = 0x01000000;
11026 command = cmd[2];
11027 }
11028
11029 cmd[0] = LE_SWAP32(cmd[0]);
11030 cmd[1] = LE_SWAP32(cmd[1]);
11031 cmd[2] = LE_SWAP32(cmd[2]);
11032 cmd[3] = LE_SWAP32(cmd[3]);
11033
11034 if (swapped) {
11035 command = cmd[2];
11036 }
11037
11038 switch ((command >> 16)) {
11039 case SLI_CTNS_GA_NXT:
11040 cmd[4] = LE_SWAP32(cmd[4]);
11041 break;
11042
11043 case SLI_CTNS_GPN_ID:
11044 case SLI_CTNS_GNN_ID:
11045 case SLI_CTNS_RPN_ID:
11046 case SLI_CTNS_RNN_ID:
11047 case SLI_CTNS_RSPN_ID:
11048 cmd[4] = LE_SWAP32(cmd[4]);
11049 break;
11050
11051 case SLI_CTNS_RCS_ID:
11052 case SLI_CTNS_RPT_ID:
11053 cmd[4] = LE_SWAP32(cmd[4]);
11054 cmd[5] = LE_SWAP32(cmd[5]);
11055 break;
11056
11057 case SLI_CTNS_RFT_ID:
11058 cmd[4] = LE_SWAP32(cmd[4]);
11059
11060 /* Swap FC4 types */
11061 for (i = 0; i < 8; i++) {
11062 cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
11063 }
11064 break;
11065
11066 case SLI_CTNS_GFT_ID:
11067 if (rsp) {
11068 /* Swap FC4 types */
11069 for (i = 0; i < 8; i++) {
11070 rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
11071 }
11072 }
11073 break;
11074
11075 case SLI_CTNS_GCS_ID:
11076 case SLI_CTNS_GSPN_ID:
11077 case SLI_CTNS_GSNN_NN:
11078 case SLI_CTNS_GIP_NN:
11079 case SLI_CTNS_GIPA_NN:
11080
11081 case SLI_CTNS_GPT_ID:
11082 case SLI_CTNS_GID_NN:
11083 case SLI_CTNS_GNN_IP:
11084 case SLI_CTNS_GIPA_IP:
11085 case SLI_CTNS_GID_FT:
11086 case SLI_CTNS_GID_PT:
11087 case SLI_CTNS_GID_PN:
11088 case SLI_CTNS_RIP_NN:
11089 case SLI_CTNS_RIPA_NN:
11090 case SLI_CTNS_RSNN_NN:
11091 case SLI_CTNS_DA_ID:
11092 case SLI_CT_RESPONSE_FS_RJT:
11093 case SLI_CT_RESPONSE_FS_ACC:
11094
11095 default:
11096 break;
11097 }
11098 return;
11099
11100 } /* emlxs_swap_ct_pkt() */
11101
11102
11103 extern void
emlxs_swap_els_ub(fc_unsol_buf_t * ubp)11104 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
11105 {
11106 emlxs_ub_priv_t *ub_priv;
11107 fc_rscn_t *rscn;
11108 uint32_t count;
11109 uint32_t i;
11110 uint32_t *lp;
11111 la_els_logi_t *logi;
11112
11113 ub_priv = ubp->ub_fca_private;
11114
11115 switch (ub_priv->cmd) {
11116 case ELS_CMD_RSCN:
11117 rscn = (fc_rscn_t *)ubp->ub_buffer;
11118
11119 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
11120
11121 count = ((rscn->rscn_payload_len - 4) / 4);
11122 lp = (uint32_t *)ubp->ub_buffer + 1;
11123 for (i = 0; i < count; i++, lp++) {
11124 *lp = LE_SWAP32(*lp);
11125 }
11126
11127 break;
11128
11129 case ELS_CMD_FLOGI:
11130 case ELS_CMD_PLOGI:
11131 case ELS_CMD_FDISC:
11132 case ELS_CMD_PDISC:
11133 logi = (la_els_logi_t *)ubp->ub_buffer;
11134 emlxs_swap_service_params(
11135 (SERV_PARM *)&logi->common_service);
11136 break;
11137
11138 /* ULP handles this */
11139 case ELS_CMD_LOGO:
11140 case ELS_CMD_PRLI:
11141 case ELS_CMD_PRLO:
11142 case ELS_CMD_ADISC:
11143 default:
11144 break;
11145 }
11146
11147 return;
11148
11149 } /* emlxs_swap_els_ub() */
11150
11151
11152 #endif /* EMLXS_MODREV2X */
11153
11154
11155 extern char *
emlxs_mode_xlate(uint32_t mode)11156 emlxs_mode_xlate(uint32_t mode)
11157 {
11158 static char buffer[32];
11159 uint32_t i;
11160 uint32_t count;
11161
11162 count = sizeof (emlxs_mode_table) / sizeof (emlxs_table_t);
11163 for (i = 0; i < count; i++) {
11164 if (mode == emlxs_mode_table[i].code) {
11165 return (emlxs_mode_table[i].string);
11166 }
11167 }
11168
11169 (void) snprintf(buffer, sizeof (buffer), "Unknown (%x)", mode);
11170 return (buffer);
11171
11172 } /* emlxs_mode_xlate() */
11173
11174
11175 extern char *
emlxs_elscmd_xlate(uint32_t elscmd)11176 emlxs_elscmd_xlate(uint32_t elscmd)
11177 {
11178 static char buffer[32];
11179 uint32_t i;
11180 uint32_t count;
11181
11182 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
11183 for (i = 0; i < count; i++) {
11184 if (elscmd == emlxs_elscmd_table[i].code) {
11185 return (emlxs_elscmd_table[i].string);
11186 }
11187 }
11188
11189 (void) snprintf(buffer, sizeof (buffer), "ELS=0x%x", elscmd);
11190 return (buffer);
11191
11192 } /* emlxs_elscmd_xlate() */
11193
11194
11195 extern char *
emlxs_ctcmd_xlate(uint32_t ctcmd)11196 emlxs_ctcmd_xlate(uint32_t ctcmd)
11197 {
11198 static char buffer[32];
11199 uint32_t i;
11200 uint32_t count;
11201
11202 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
11203 for (i = 0; i < count; i++) {
11204 if (ctcmd == emlxs_ctcmd_table[i].code) {
11205 return (emlxs_ctcmd_table[i].string);
11206 }
11207 }
11208
11209 (void) snprintf(buffer, sizeof (buffer), "cmd=0x%x", ctcmd);
11210 return (buffer);
11211
11212 } /* emlxs_ctcmd_xlate() */
11213
11214
11215 #ifdef MENLO_SUPPORT
11216 extern char *
emlxs_menlo_cmd_xlate(uint32_t cmd)11217 emlxs_menlo_cmd_xlate(uint32_t cmd)
11218 {
11219 static char buffer[32];
11220 uint32_t i;
11221 uint32_t count;
11222
11223 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
11224 for (i = 0; i < count; i++) {
11225 if (cmd == emlxs_menlo_cmd_table[i].code) {
11226 return (emlxs_menlo_cmd_table[i].string);
11227 }
11228 }
11229
11230 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
11231 return (buffer);
11232
11233 } /* emlxs_menlo_cmd_xlate() */
11234
11235 extern char *
emlxs_menlo_rsp_xlate(uint32_t rsp)11236 emlxs_menlo_rsp_xlate(uint32_t rsp)
11237 {
11238 static char buffer[32];
11239 uint32_t i;
11240 uint32_t count;
11241
11242 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
11243 for (i = 0; i < count; i++) {
11244 if (rsp == emlxs_menlo_rsp_table[i].code) {
11245 return (emlxs_menlo_rsp_table[i].string);
11246 }
11247 }
11248
11249 (void) snprintf(buffer, sizeof (buffer), "Rsp=0x%x", rsp);
11250 return (buffer);
11251
11252 } /* emlxs_menlo_rsp_xlate() */
11253
11254 #endif /* MENLO_SUPPORT */
11255
11256
11257 extern char *
emlxs_rmcmd_xlate(uint32_t rmcmd)11258 emlxs_rmcmd_xlate(uint32_t rmcmd)
11259 {
11260 static char buffer[32];
11261 uint32_t i;
11262 uint32_t count;
11263
11264 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
11265 for (i = 0; i < count; i++) {
11266 if (rmcmd == emlxs_rmcmd_table[i].code) {
11267 return (emlxs_rmcmd_table[i].string);
11268 }
11269 }
11270
11271 (void) snprintf(buffer, sizeof (buffer), "RM=0x%x", rmcmd);
11272 return (buffer);
11273
11274 } /* emlxs_rmcmd_xlate() */
11275
11276
11277
11278 extern char *
emlxs_mscmd_xlate(uint16_t mscmd)11279 emlxs_mscmd_xlate(uint16_t mscmd)
11280 {
11281 static char buffer[32];
11282 uint32_t i;
11283 uint32_t count;
11284
11285 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
11286 for (i = 0; i < count; i++) {
11287 if (mscmd == emlxs_mscmd_table[i].code) {
11288 return (emlxs_mscmd_table[i].string);
11289 }
11290 }
11291
11292 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", mscmd);
11293 return (buffer);
11294
11295 } /* emlxs_mscmd_xlate() */
11296
11297
11298 extern char *
emlxs_state_xlate(uint8_t state)11299 emlxs_state_xlate(uint8_t state)
11300 {
11301 static char buffer[32];
11302 uint32_t i;
11303 uint32_t count;
11304
11305 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
11306 for (i = 0; i < count; i++) {
11307 if (state == emlxs_state_table[i].code) {
11308 return (emlxs_state_table[i].string);
11309 }
11310 }
11311
11312 (void) snprintf(buffer, sizeof (buffer), "State=0x%x", state);
11313 return (buffer);
11314
11315 } /* emlxs_state_xlate() */
11316
11317
11318 extern char *
emlxs_error_xlate(uint8_t errno)11319 emlxs_error_xlate(uint8_t errno)
11320 {
11321 static char buffer[32];
11322 uint32_t i;
11323 uint32_t count;
11324
11325 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
11326 for (i = 0; i < count; i++) {
11327 if (errno == emlxs_error_table[i].code) {
11328 return (emlxs_error_table[i].string);
11329 }
11330 }
11331
11332 (void) snprintf(buffer, sizeof (buffer), "Errno=0x%x", errno);
11333 return (buffer);
11334
11335 } /* emlxs_error_xlate() */
11336
11337
11338 static int
emlxs_pm_lower_power(dev_info_t * dip)11339 emlxs_pm_lower_power(dev_info_t *dip)
11340 {
11341 int ddiinst;
11342 int emlxinst;
11343 emlxs_config_t *cfg;
11344 int32_t rval;
11345 emlxs_hba_t *hba;
11346
11347 ddiinst = ddi_get_instance(dip);
11348 emlxinst = emlxs_get_instance(ddiinst);
11349 hba = emlxs_device.hba[emlxinst];
11350 cfg = &CFG;
11351
11352 rval = DDI_SUCCESS;
11353
11354 /* Lower the power level */
11355 if (cfg[CFG_PM_SUPPORT].current) {
11356 rval =
11357 pm_lower_power(dip, EMLXS_PM_ADAPTER,
11358 EMLXS_PM_ADAPTER_DOWN);
11359 } else {
11360 /* We do not have kernel support of power management enabled */
11361 /* therefore, call our power management routine directly */
11362 rval =
11363 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
11364 }
11365
11366 return (rval);
11367
11368 } /* emlxs_pm_lower_power() */
11369
11370
11371 static int
emlxs_pm_raise_power(dev_info_t * dip)11372 emlxs_pm_raise_power(dev_info_t *dip)
11373 {
11374 int ddiinst;
11375 int emlxinst;
11376 emlxs_config_t *cfg;
11377 int32_t rval;
11378 emlxs_hba_t *hba;
11379
11380 ddiinst = ddi_get_instance(dip);
11381 emlxinst = emlxs_get_instance(ddiinst);
11382 hba = emlxs_device.hba[emlxinst];
11383 cfg = &CFG;
11384
11385 /* Raise the power level */
11386 if (cfg[CFG_PM_SUPPORT].current) {
11387 rval =
11388 pm_raise_power(dip, EMLXS_PM_ADAPTER,
11389 EMLXS_PM_ADAPTER_UP);
11390 } else {
11391 /* We do not have kernel support of power management enabled */
11392 /* therefore, call our power management routine directly */
11393 rval =
11394 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
11395 }
11396
11397 return (rval);
11398
11399 } /* emlxs_pm_raise_power() */
11400
11401
11402 #ifdef IDLE_TIMER
11403
11404 extern int
emlxs_pm_busy_component(emlxs_hba_t * hba)11405 emlxs_pm_busy_component(emlxs_hba_t *hba)
11406 {
11407 emlxs_config_t *cfg = &CFG;
11408 int rval;
11409
11410 hba->pm_active = 1;
11411
11412 if (hba->pm_busy) {
11413 return (DDI_SUCCESS);
11414 }
11415
11416 mutex_enter(&EMLXS_PM_LOCK);
11417
11418 if (hba->pm_busy) {
11419 mutex_exit(&EMLXS_PM_LOCK);
11420 return (DDI_SUCCESS);
11421 }
11422 hba->pm_busy = 1;
11423
11424 mutex_exit(&EMLXS_PM_LOCK);
11425
11426 /* Attempt to notify system that we are busy */
11427 if (cfg[CFG_PM_SUPPORT].current) {
11428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11429 "pm_busy_component.");
11430
11431 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
11432
11433 if (rval != DDI_SUCCESS) {
11434 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11435 "pm_busy_component failed. ret=%d", rval);
11436
11437 /* If this attempt failed then clear our flags */
11438 mutex_enter(&EMLXS_PM_LOCK);
11439 hba->pm_busy = 0;
11440 mutex_exit(&EMLXS_PM_LOCK);
11441
11442 return (rval);
11443 }
11444 }
11445
11446 return (DDI_SUCCESS);
11447
11448 } /* emlxs_pm_busy_component() */
11449
11450
11451 extern int
emlxs_pm_idle_component(emlxs_hba_t * hba)11452 emlxs_pm_idle_component(emlxs_hba_t *hba)
11453 {
11454 emlxs_config_t *cfg = &CFG;
11455 int rval;
11456
11457 if (!hba->pm_busy) {
11458 return (DDI_SUCCESS);
11459 }
11460
11461 mutex_enter(&EMLXS_PM_LOCK);
11462
11463 if (!hba->pm_busy) {
11464 mutex_exit(&EMLXS_PM_LOCK);
11465 return (DDI_SUCCESS);
11466 }
11467 hba->pm_busy = 0;
11468
11469 mutex_exit(&EMLXS_PM_LOCK);
11470
11471 if (cfg[CFG_PM_SUPPORT].current) {
11472 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11473 "pm_idle_component.");
11474
11475 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
11476
11477 if (rval != DDI_SUCCESS) {
11478 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11479 "pm_idle_component failed. ret=%d", rval);
11480
11481 /* If this attempt failed then */
11482 /* reset our flags for another attempt */
11483 mutex_enter(&EMLXS_PM_LOCK);
11484 hba->pm_busy = 1;
11485 mutex_exit(&EMLXS_PM_LOCK);
11486
11487 return (rval);
11488 }
11489 }
11490
11491 return (DDI_SUCCESS);
11492
11493 } /* emlxs_pm_idle_component() */
11494
11495
11496 extern void
emlxs_pm_idle_timer(emlxs_hba_t * hba)11497 emlxs_pm_idle_timer(emlxs_hba_t *hba)
11498 {
11499 emlxs_config_t *cfg = &CFG;
11500
11501 if (hba->pm_active) {
11502 /* Clear active flag and reset idle timer */
11503 mutex_enter(&EMLXS_PM_LOCK);
11504 hba->pm_active = 0;
11505 hba->pm_idle_timer =
11506 hba->timer_tics + cfg[CFG_PM_IDLE].current;
11507 mutex_exit(&EMLXS_PM_LOCK);
11508 }
11509
11510 /* Check for idle timeout */
11511 else if (hba->timer_tics >= hba->pm_idle_timer) {
11512 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
11513 mutex_enter(&EMLXS_PM_LOCK);
11514 hba->pm_idle_timer =
11515 hba->timer_tics + cfg[CFG_PM_IDLE].current;
11516 mutex_exit(&EMLXS_PM_LOCK);
11517 }
11518 }
11519
11520 return;
11521
11522 } /* emlxs_pm_idle_timer() */
11523
11524 #endif /* IDLE_TIMER */
11525
11526
11527 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
11528 static void
emlxs_read_vport_prop(emlxs_hba_t * hba)11529 emlxs_read_vport_prop(emlxs_hba_t *hba)
11530 {
11531 emlxs_port_t *port = &PPORT;
11532 emlxs_config_t *cfg = &CFG;
11533 char **arrayp;
11534 uint8_t *s;
11535 uint8_t *np;
11536 NAME_TYPE pwwpn;
11537 NAME_TYPE wwnn;
11538 NAME_TYPE wwpn;
11539 uint32_t vpi;
11540 uint32_t cnt;
11541 uint32_t rval;
11542 uint32_t i;
11543 uint32_t j;
11544 uint32_t c1;
11545 uint32_t sum;
11546 uint32_t errors;
11547 char buffer[64];
11548
11549 /* Check for the per adapter vport setting */
11550 (void) snprintf(buffer, sizeof (buffer), "%s%d-vport", DRIVER_NAME,
11551 hba->ddiinst);
11552 cnt = 0;
11553 arrayp = NULL;
11554 rval =
11555 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11556 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
11557
11558 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11559 /* Check for the global vport setting */
11560 cnt = 0;
11561 arrayp = NULL;
11562 rval =
11563 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11564 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
11565 }
11566
11567 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11568 return;
11569 }
11570
11571 for (i = 0; i < cnt; i++) {
11572 errors = 0;
11573 s = (uint8_t *)arrayp[i];
11574
11575 if (!s) {
11576 break;
11577 }
11578
11579 np = (uint8_t *)&pwwpn;
11580 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11581 c1 = *s++;
11582 if ((c1 >= '0') && (c1 <= '9')) {
11583 sum = ((c1 - '0') << 4);
11584 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11585 sum = ((c1 - 'a' + 10) << 4);
11586 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11587 sum = ((c1 - 'A' + 10) << 4);
11588 } else {
11589 EMLXS_MSGF(EMLXS_CONTEXT,
11590 &emlxs_attach_debug_msg,
11591 "Config error: Invalid PWWPN found. "
11592 "entry=%d byte=%d hi_nibble=%c",
11593 i, j, c1);
11594 errors++;
11595 }
11596
11597 c1 = *s++;
11598 if ((c1 >= '0') && (c1 <= '9')) {
11599 sum |= (c1 - '0');
11600 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11601 sum |= (c1 - 'a' + 10);
11602 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11603 sum |= (c1 - 'A' + 10);
11604 } else {
11605 EMLXS_MSGF(EMLXS_CONTEXT,
11606 &emlxs_attach_debug_msg,
11607 "Config error: Invalid PWWPN found. "
11608 "entry=%d byte=%d lo_nibble=%c",
11609 i, j, c1);
11610 errors++;
11611 }
11612
11613 *np++ = (uint8_t)sum;
11614 }
11615
11616 if (*s++ != ':') {
11617 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11618 "Config error: Invalid delimiter after PWWPN. "
11619 "entry=%d", i);
11620 goto out;
11621 }
11622
11623 np = (uint8_t *)&wwnn;
11624 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11625 c1 = *s++;
11626 if ((c1 >= '0') && (c1 <= '9')) {
11627 sum = ((c1 - '0') << 4);
11628 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11629 sum = ((c1 - 'a' + 10) << 4);
11630 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11631 sum = ((c1 - 'A' + 10) << 4);
11632 } else {
11633 EMLXS_MSGF(EMLXS_CONTEXT,
11634 &emlxs_attach_debug_msg,
11635 "Config error: Invalid WWNN found. "
11636 "entry=%d byte=%d hi_nibble=%c",
11637 i, j, c1);
11638 errors++;
11639 }
11640
11641 c1 = *s++;
11642 if ((c1 >= '0') && (c1 <= '9')) {
11643 sum |= (c1 - '0');
11644 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11645 sum |= (c1 - 'a' + 10);
11646 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11647 sum |= (c1 - 'A' + 10);
11648 } else {
11649 EMLXS_MSGF(EMLXS_CONTEXT,
11650 &emlxs_attach_debug_msg,
11651 "Config error: Invalid WWNN found. "
11652 "entry=%d byte=%d lo_nibble=%c",
11653 i, j, c1);
11654 errors++;
11655 }
11656
11657 *np++ = (uint8_t)sum;
11658 }
11659
11660 if (*s++ != ':') {
11661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11662 "Config error: Invalid delimiter after WWNN. "
11663 "entry=%d", i);
11664 goto out;
11665 }
11666
11667 np = (uint8_t *)&wwpn;
11668 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11669 c1 = *s++;
11670 if ((c1 >= '0') && (c1 <= '9')) {
11671 sum = ((c1 - '0') << 4);
11672 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11673 sum = ((c1 - 'a' + 10) << 4);
11674 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11675 sum = ((c1 - 'A' + 10) << 4);
11676 } else {
11677 EMLXS_MSGF(EMLXS_CONTEXT,
11678 &emlxs_attach_debug_msg,
11679 "Config error: Invalid WWPN found. "
11680 "entry=%d byte=%d hi_nibble=%c",
11681 i, j, c1);
11682
11683 errors++;
11684 }
11685
11686 c1 = *s++;
11687 if ((c1 >= '0') && (c1 <= '9')) {
11688 sum |= (c1 - '0');
11689 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11690 sum |= (c1 - 'a' + 10);
11691 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11692 sum |= (c1 - 'A' + 10);
11693 } else {
11694 EMLXS_MSGF(EMLXS_CONTEXT,
11695 &emlxs_attach_debug_msg,
11696 "Config error: Invalid WWPN found. "
11697 "entry=%d byte=%d lo_nibble=%c",
11698 i, j, c1);
11699
11700 errors++;
11701 }
11702
11703 *np++ = (uint8_t)sum;
11704 }
11705
11706 if (*s++ != ':') {
11707 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11708 "Config error: Invalid delimiter after WWPN. "
11709 "entry=%d", i);
11710
11711 goto out;
11712 }
11713
11714 sum = 0;
11715 do {
11716 c1 = *s++;
11717 if ((c1 < '0') || (c1 > '9')) {
11718 EMLXS_MSGF(EMLXS_CONTEXT,
11719 &emlxs_attach_debug_msg,
11720 "Config error: Invalid VPI found. "
11721 "entry=%d c=%c vpi=%d", i, c1, sum);
11722
11723 goto out;
11724 }
11725
11726 sum = (sum * 10) + (c1 - '0');
11727
11728 } while (*s != 0);
11729
11730 vpi = sum;
11731
11732 if (errors) {
11733 continue;
11734 }
11735
11736 /* Entry has been read */
11737
11738 /* Check if the physical port wwpn */
11739 /* matches our physical port wwpn */
11740 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
11741 continue;
11742 }
11743
11744 /* Check vpi range */
11745 if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
11746 continue;
11747 }
11748
11749 /* Check if port has already been configured */
11750 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
11751 continue;
11752 }
11753
11754 /* Set the highest configured vpi */
11755 if (vpi > hba->vpi_high) {
11756 hba->vpi_high = vpi;
11757 }
11758
11759 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
11760 sizeof (NAME_TYPE));
11761 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
11762 sizeof (NAME_TYPE));
11763
11764 if (hba->port[vpi].snn[0] == 0) {
11765 (void) strncpy((caddr_t)hba->port[vpi].snn,
11766 (caddr_t)hba->snn,
11767 (sizeof (hba->port[vpi].snn)-1));
11768 }
11769
11770 if (hba->port[vpi].spn[0] == 0) {
11771 (void) snprintf((caddr_t)hba->port[vpi].spn,
11772 sizeof (hba->port[vpi].spn),
11773 "%s VPort-%d",
11774 (caddr_t)hba->spn, vpi);
11775 }
11776
11777 hba->port[vpi].flag |=
11778 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
11779
11780 if (cfg[CFG_VPORT_RESTRICTED].current) {
11781 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
11782 }
11783 }
11784
11785 out:
11786
11787 (void) ddi_prop_free((void *) arrayp);
11788 return;
11789
11790 } /* emlxs_read_vport_prop() */
11791 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
11792
11793
11794 extern char *
emlxs_wwn_xlate(char * buffer,size_t len,uint8_t * wwn)11795 emlxs_wwn_xlate(char *buffer, size_t len, uint8_t *wwn)
11796 {
11797 (void) snprintf(buffer, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
11798 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
11799 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
11800
11801 return (buffer);
11802
11803 } /* emlxs_wwn_xlate() */
11804
11805
11806 extern int32_t
emlxs_wwn_cmp(uint8_t * wwn1,uint8_t * wwn2)11807 emlxs_wwn_cmp(uint8_t *wwn1, uint8_t *wwn2)
11808 {
11809 uint32_t i;
11810
11811 for (i = 0; i < 8; i ++, wwn1 ++, wwn2 ++) {
11812 if (*wwn1 > *wwn2) {
11813 return (1);
11814 }
11815 if (*wwn1 < *wwn2) {
11816 return (-1);
11817 }
11818 }
11819
11820 return (0);
11821
11822 } /* emlxs_wwn_cmp() */
11823
11824
11825 /* This is called at port online and offline */
11826 extern void
emlxs_ub_flush(emlxs_port_t * port)11827 emlxs_ub_flush(emlxs_port_t *port)
11828 {
11829 emlxs_hba_t *hba = HBA;
11830 fc_unsol_buf_t *ubp;
11831 emlxs_ub_priv_t *ub_priv;
11832 emlxs_ub_priv_t *next;
11833
11834 /* Return if nothing to do */
11835 if (!port->ub_wait_head) {
11836 return;
11837 }
11838
11839 mutex_enter(&EMLXS_PORT_LOCK);
11840 ub_priv = port->ub_wait_head;
11841 port->ub_wait_head = NULL;
11842 port->ub_wait_tail = NULL;
11843 mutex_exit(&EMLXS_PORT_LOCK);
11844
11845 while (ub_priv) {
11846 next = ub_priv->next;
11847 ubp = ub_priv->ubp;
11848
11849 /* Check if ULP is online and we have a callback function */
11850 if (port->ulp_statec != FC_STATE_OFFLINE) {
11851 /* Send ULP the ub buffer */
11852 emlxs_ulp_unsol_cb(port, ubp);
11853 } else { /* Drop the buffer */
11854 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11855 }
11856
11857 ub_priv = next;
11858
11859 } /* while () */
11860
11861 return;
11862
11863 } /* emlxs_ub_flush() */
11864
11865
11866 extern void
emlxs_ub_callback(emlxs_port_t * port,fc_unsol_buf_t * ubp)11867 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
11868 {
11869 emlxs_hba_t *hba = HBA;
11870 emlxs_ub_priv_t *ub_priv;
11871
11872 ub_priv = ubp->ub_fca_private;
11873
11874 /* Check if ULP is online */
11875 if (port->ulp_statec != FC_STATE_OFFLINE) {
11876 emlxs_ulp_unsol_cb(port, ubp);
11877
11878 } else { /* ULP offline */
11879
11880 if (hba->state >= FC_LINK_UP) {
11881 /* Add buffer to queue tail */
11882 mutex_enter(&EMLXS_PORT_LOCK);
11883
11884 if (port->ub_wait_tail) {
11885 port->ub_wait_tail->next = ub_priv;
11886 }
11887 port->ub_wait_tail = ub_priv;
11888
11889 if (!port->ub_wait_head) {
11890 port->ub_wait_head = ub_priv;
11891 }
11892
11893 mutex_exit(&EMLXS_PORT_LOCK);
11894 } else {
11895 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11896 }
11897 }
11898
11899 return;
11900
11901 } /* emlxs_ub_callback() */
11902
11903
11904 extern void
emlxs_fca_link_up(emlxs_port_t * port)11905 emlxs_fca_link_up(emlxs_port_t *port)
11906 {
11907 emlxs_ulp_statec_cb(port, port->ulp_statec);
11908 return;
11909
11910 } /* emlxs_fca_link_up() */
11911
11912
11913 extern void
emlxs_fca_link_down(emlxs_port_t * port)11914 emlxs_fca_link_down(emlxs_port_t *port)
11915 {
11916 emlxs_ulp_statec_cb(port, FC_STATE_OFFLINE);
11917 return;
11918
11919 } /* emlxs_fca_link_down() */
11920
11921
11922 static uint32_t
emlxs_integrity_check(emlxs_hba_t * hba)11923 emlxs_integrity_check(emlxs_hba_t *hba)
11924 {
11925 uint32_t size;
11926 uint32_t errors = 0;
11927 int ddiinst = hba->ddiinst;
11928
11929 size = 16;
11930 if (sizeof (ULP_BDL) != size) {
11931 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16",
11932 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
11933
11934 errors++;
11935 }
11936 size = 8;
11937 if (sizeof (ULP_BDE) != size) {
11938 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8",
11939 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
11940
11941 errors++;
11942 }
11943 size = 12;
11944 if (sizeof (ULP_BDE64) != size) {
11945 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12",
11946 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
11947
11948 errors++;
11949 }
11950 size = 16;
11951 if (sizeof (HBQE_t) != size) {
11952 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16",
11953 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11954
11955 errors++;
11956 }
11957 size = 8;
11958 if (sizeof (HGP) != size) {
11959 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8",
11960 DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11961
11962 errors++;
11963 }
11964 if (sizeof (PGP) != size) {
11965 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8",
11966 DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11967
11968 errors++;
11969 }
11970 size = 4;
11971 if (sizeof (WORD5) != size) {
11972 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4",
11973 DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11974
11975 errors++;
11976 }
11977 size = 124;
11978 if (sizeof (MAILVARIANTS) != size) {
11979 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. "
11980 "%d != 124", DRIVER_NAME, ddiinst,
11981 (int)sizeof (MAILVARIANTS));
11982
11983 errors++;
11984 }
11985 size = 128;
11986 if (sizeof (SLI1_DESC) != size) {
11987 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128",
11988 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11989
11990 errors++;
11991 }
11992 if (sizeof (SLI2_DESC) != size) {
11993 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128",
11994 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11995
11996 errors++;
11997 }
11998 size = MBOX_SIZE;
11999 if (sizeof (MAILBOX) != size) {
12000 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d",
12001 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
12002
12003 errors++;
12004 }
12005 size = PCB_SIZE;
12006 if (sizeof (PCB) != size) {
12007 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d",
12008 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
12009
12010 errors++;
12011 }
12012 size = 260;
12013 if (sizeof (ATTRIBUTE_ENTRY) != size) {
12014 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. "
12015 "%d != 260", DRIVER_NAME, ddiinst,
12016 (int)sizeof (ATTRIBUTE_ENTRY));
12017
12018 errors++;
12019 }
12020 size = SLI_SLIM1_SIZE;
12021 if (sizeof (SLIM1) != size) {
12022 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d",
12023 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
12024
12025 errors++;
12026 }
12027 size = SLI3_IOCB_CMD_SIZE;
12028 if (sizeof (IOCB) != size) {
12029 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d",
12030 DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
12031 SLI3_IOCB_CMD_SIZE);
12032
12033 errors++;
12034 }
12035
12036 size = SLI_SLIM2_SIZE;
12037 if (sizeof (SLIM2) != size) {
12038 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d",
12039 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
12040 SLI_SLIM2_SIZE);
12041
12042 errors++;
12043 }
12044 return (errors);
12045
12046 } /* emlxs_integrity_check() */
12047
12048
12049 #ifdef FMA_SUPPORT
12050 /*
12051 * FMA support
12052 */
12053
12054 extern void
emlxs_fm_init(emlxs_hba_t * hba)12055 emlxs_fm_init(emlxs_hba_t *hba)
12056 {
12057 ddi_iblock_cookie_t iblk;
12058
12059 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12060 return;
12061 }
12062
12063 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12064 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12065 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12066 }
12067
12068 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
12069 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12070 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
12071 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
12072 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
12073 } else {
12074 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12075 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12076 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12077 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12078 }
12079
12080 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
12081
12082 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12083 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12084 pci_ereport_setup(hba->dip);
12085 }
12086
12087 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12088 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
12089 (void *)hba);
12090 }
12091
12092 } /* emlxs_fm_init() */
12093
12094
12095 extern void
emlxs_fm_fini(emlxs_hba_t * hba)12096 emlxs_fm_fini(emlxs_hba_t *hba)
12097 {
12098 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12099 return;
12100 }
12101
12102 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12103 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12104 pci_ereport_teardown(hba->dip);
12105 }
12106
12107 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12108 ddi_fm_handler_unregister(hba->dip);
12109 }
12110
12111 (void) ddi_fm_fini(hba->dip);
12112
12113 } /* emlxs_fm_fini() */
12114
12115
12116 extern int
emlxs_fm_check_acc_handle(emlxs_hba_t * hba,ddi_acc_handle_t handle)12117 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
12118 {
12119 ddi_fm_error_t err;
12120
12121 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12122 return (DDI_FM_OK);
12123 }
12124
12125 /* Some S10 versions do not define the ahi_err structure */
12126 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
12127 return (DDI_FM_OK);
12128 }
12129
12130 err.fme_status = DDI_FM_OK;
12131 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
12132
12133 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */
12134 if ((void *)&ddi_fm_acc_err_clear != NULL) {
12135 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
12136 }
12137
12138 return (err.fme_status);
12139
12140 } /* emlxs_fm_check_acc_handle() */
12141
12142
12143 extern int
emlxs_fm_check_dma_handle(emlxs_hba_t * hba,ddi_dma_handle_t handle)12144 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
12145 {
12146 ddi_fm_error_t err;
12147
12148 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12149 return (DDI_FM_OK);
12150 }
12151
12152 err.fme_status = DDI_FM_OK;
12153 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
12154
12155 return (err.fme_status);
12156
12157 } /* emlxs_fm_check_dma_handle() */
12158
12159
12160 extern void
emlxs_fm_ereport(emlxs_hba_t * hba,char * detail)12161 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
12162 {
12163 uint64_t ena;
12164 char buf[FM_MAX_CLASS];
12165
12166 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12167 return;
12168 }
12169
12170 if (detail == NULL) {
12171 return;
12172 }
12173
12174 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12175 ena = fm_ena_generate(0, FM_ENA_FMT1);
12176
12177 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
12178 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12179
12180 } /* emlxs_fm_ereport() */
12181
12182
12183 extern void
emlxs_fm_service_impact(emlxs_hba_t * hba,int impact)12184 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
12185 {
12186 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12187 return;
12188 }
12189
12190 if (impact == 0) {
12191 return;
12192 }
12193
12194 if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
12195 (impact == DDI_SERVICE_DEGRADED)) {
12196 impact = DDI_SERVICE_UNAFFECTED;
12197 }
12198
12199 ddi_fm_service_impact(hba->dip, impact);
12200
12201 return;
12202
12203 } /* emlxs_fm_service_impact() */
12204
12205
12206 /*
12207 * The I/O fault service error handling callback function
12208 */
12209 /*ARGSUSED*/
12210 extern int
emlxs_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)12211 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
12212 const void *impl_data)
12213 {
12214 /*
12215 * as the driver can always deal with an error
12216 * in any dma or access handle, we can just return
12217 * the fme_status value.
12218 */
12219 pci_ereport_post(dip, err, NULL);
12220 return (err->fme_status);
12221
12222 } /* emlxs_fm_error_cb() */
12223
12224 extern void
emlxs_check_dma(emlxs_hba_t * hba,emlxs_buf_t * sbp)12225 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
12226 {
12227 emlxs_port_t *port = sbp->port;
12228 fc_packet_t *pkt = PRIV2PKT(sbp);
12229
12230 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
12231 if (emlxs_fm_check_dma_handle(hba,
12232 hba->sli.sli4.slim2.dma_handle)
12233 != DDI_FM_OK) {
12234 EMLXS_MSGF(EMLXS_CONTEXT,
12235 &emlxs_invalid_dma_handle_msg,
12236 "slim2: hdl=%p",
12237 hba->sli.sli4.slim2.dma_handle);
12238
12239 mutex_enter(&EMLXS_PORT_LOCK);
12240 hba->flag |= FC_DMA_CHECK_ERROR;
12241 mutex_exit(&EMLXS_PORT_LOCK);
12242 }
12243 } else {
12244 if (emlxs_fm_check_dma_handle(hba,
12245 hba->sli.sli3.slim2.dma_handle)
12246 != DDI_FM_OK) {
12247 EMLXS_MSGF(EMLXS_CONTEXT,
12248 &emlxs_invalid_dma_handle_msg,
12249 "slim2: hdl=%p",
12250 hba->sli.sli3.slim2.dma_handle);
12251
12252 mutex_enter(&EMLXS_PORT_LOCK);
12253 hba->flag |= FC_DMA_CHECK_ERROR;
12254 mutex_exit(&EMLXS_PORT_LOCK);
12255 }
12256 }
12257
12258 if (hba->flag & FC_DMA_CHECK_ERROR) {
12259 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12260 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12261 pkt->pkt_expln = FC_EXPLN_NONE;
12262 pkt->pkt_action = FC_ACTION_RETRYABLE;
12263 return;
12264 }
12265
12266 if (pkt->pkt_cmdlen) {
12267 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
12268 != DDI_FM_OK) {
12269 EMLXS_MSGF(EMLXS_CONTEXT,
12270 &emlxs_invalid_dma_handle_msg,
12271 "pkt_cmd_dma: hdl=%p",
12272 pkt->pkt_cmd_dma);
12273
12274 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12275 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12276 pkt->pkt_expln = FC_EXPLN_NONE;
12277 pkt->pkt_action = FC_ACTION_RETRYABLE;
12278
12279 return;
12280 }
12281 }
12282
12283 if (pkt->pkt_rsplen) {
12284 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
12285 != DDI_FM_OK) {
12286 EMLXS_MSGF(EMLXS_CONTEXT,
12287 &emlxs_invalid_dma_handle_msg,
12288 "pkt_resp_dma: hdl=%p",
12289 pkt->pkt_resp_dma);
12290
12291 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12292 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12293 pkt->pkt_expln = FC_EXPLN_NONE;
12294 pkt->pkt_action = FC_ACTION_RETRYABLE;
12295
12296 return;
12297 }
12298 }
12299
12300 if (pkt->pkt_datalen) {
12301 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
12302 != DDI_FM_OK) {
12303 EMLXS_MSGF(EMLXS_CONTEXT,
12304 &emlxs_invalid_dma_handle_msg,
12305 "pkt_data_dma: hdl=%p",
12306 pkt->pkt_data_dma);
12307
12308 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12309 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12310 pkt->pkt_expln = FC_EXPLN_NONE;
12311 pkt->pkt_action = FC_ACTION_RETRYABLE;
12312
12313 return;
12314 }
12315 }
12316
12317 return;
12318
12319 }
12320 #endif /* FMA_SUPPORT */
12321
12322
12323 extern void
emlxs_swap32_buffer(uint8_t * buffer,uint32_t size)12324 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
12325 {
12326 uint32_t word;
12327 uint32_t *wptr;
12328 uint32_t i;
12329
12330 VERIFY((size % 4) == 0);
12331
12332 wptr = (uint32_t *)buffer;
12333
12334 for (i = 0; i < size / 4; i++) {
12335 word = *wptr;
12336 *wptr++ = SWAP32(word);
12337 }
12338
12339 return;
12340
12341 } /* emlxs_swap32_buffer() */
12342
12343
12344 extern void
emlxs_swap32_bcopy(uint8_t * src,uint8_t * dst,uint32_t size)12345 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
12346 {
12347 uint32_t word;
12348 uint32_t *sptr;
12349 uint32_t *dptr;
12350 uint32_t i;
12351
12352 VERIFY((size % 4) == 0);
12353
12354 sptr = (uint32_t *)src;
12355 dptr = (uint32_t *)dst;
12356
12357 for (i = 0; i < size / 4; i++) {
12358 word = *sptr++;
12359 *dptr++ = SWAP32(word);
12360 }
12361
12362 return;
12363
12364 } /* emlxs_swap32_buffer() */
12365
12366
12367 extern char *
emlxs_strtoupper(char * str)12368 emlxs_strtoupper(char *str)
12369 {
12370 char *cptr = str;
12371
12372 while (*cptr) {
12373 if ((*cptr >= 'a') && (*cptr <= 'z')) {
12374 *cptr -= ('a' - 'A');
12375 }
12376 cptr++;
12377 }
12378
12379 return (str);
12380
12381 } /* emlxs_strtoupper() */
12382
12383
12384 extern void
emlxs_ulp_statec_cb(emlxs_port_t * port,uint32_t statec)12385 emlxs_ulp_statec_cb(emlxs_port_t *port, uint32_t statec)
12386 {
12387 emlxs_hba_t *hba = HBA;
12388
12389 /* This routine coordinates protection with emlxs_fca_unbind_port() */
12390
12391 mutex_enter(&EMLXS_PORT_LOCK);
12392 if (!(port->flag & EMLXS_INI_BOUND)) {
12393 mutex_exit(&EMLXS_PORT_LOCK);
12394 return;
12395 }
12396 port->ulp_busy++;
12397 mutex_exit(&EMLXS_PORT_LOCK);
12398
12399 port->ulp_statec_cb(port->ulp_handle, statec);
12400
12401 mutex_enter(&EMLXS_PORT_LOCK);
12402 port->ulp_busy--;
12403 mutex_exit(&EMLXS_PORT_LOCK);
12404
12405 return;
12406
12407 } /* emlxs_ulp_statec_cb() */
12408
12409
12410 extern void
emlxs_ulp_unsol_cb(emlxs_port_t * port,fc_unsol_buf_t * ubp)12411 emlxs_ulp_unsol_cb(emlxs_port_t *port, fc_unsol_buf_t *ubp)
12412 {
12413 emlxs_hba_t *hba = HBA;
12414
12415 /* This routine coordinates protection with emlxs_fca_unbind_port() */
12416
12417 mutex_enter(&EMLXS_PORT_LOCK);
12418 if (!(port->flag & EMLXS_INI_BOUND)) {
12419 mutex_exit(&EMLXS_PORT_LOCK);
12420 return;
12421 }
12422 port->ulp_busy++;
12423 mutex_exit(&EMLXS_PORT_LOCK);
12424
12425 port->ulp_unsol_cb(port->ulp_handle, ubp, ubp->ub_frame.type);
12426
12427 mutex_enter(&EMLXS_PORT_LOCK);
12428 port->ulp_busy--;
12429 mutex_exit(&EMLXS_PORT_LOCK);
12430
12431 return;
12432
12433 } /* emlxs_ulp_unsol_cb() */
12434