1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2020 RackTop Systems, Inc.
26 */
27
28 #define DEF_ICFG 1
29
30 #include <emlxs.h>
31 #include <emlxs_version.h>
32
33
34 static char emlxs_copyright[] = EMLXS_COPYRIGHT;
35 char emlxs_revision[] = EMLXS_REVISION;
36 char emlxs_version[] = EMLXS_VERSION;
37 char emlxs_name[] = EMLXS_NAME;
38 char emlxs_label[] = EMLXS_LABEL;
39
40 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
41 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
42
43 #ifdef MENLO_SUPPORT
44 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
45 #endif /* MENLO_SUPPORT */
46
47 static void emlxs_fca_attach(emlxs_hba_t *hba);
48 static void emlxs_fca_detach(emlxs_hba_t *hba);
49 static void emlxs_drv_banner(emlxs_hba_t *hba);
50
51 static int32_t emlxs_get_props(emlxs_hba_t *hba);
52 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp,
53 uint32_t *pkt_flags);
54 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
60 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
61 static uint32_t emlxs_add_instance(int32_t ddiinst);
62 static void emlxs_iodone(emlxs_buf_t *sbp);
63 static int emlxs_pm_lower_power(dev_info_t *dip);
64 static int emlxs_pm_raise_power(dev_info_t *dip);
65 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
66 uint32_t failed);
67 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
68 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba);
69 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
70 uint32_t args, uint32_t *arg);
71
72 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
73 static void emlxs_read_vport_prop(emlxs_hba_t *hba);
74 #endif /* EMLXS_MODREV3 && EMLXS_MODREV4 */
75
76 static void emlxs_mode_init_masks(emlxs_hba_t *hba);
77
78
79 extern int
80 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id);
81 extern int
82 emlxs_select_msiid(emlxs_hba_t *hba);
83 extern void
84 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
85
86 /*
87 * Driver Entry Routines.
88 */
89 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
90 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
91 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
92 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *);
93 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
94 cred_t *, int32_t *);
95 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
96
97
98 /*
99 * FC_AL Transport Functions.
100 */
101 static opaque_t emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *,
102 fc_fca_bind_info_t *);
103 static void emlxs_fca_unbind_port(opaque_t);
104 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
105 static int32_t emlxs_fca_get_cap(opaque_t, char *, void *);
106 static int32_t emlxs_fca_set_cap(opaque_t, char *, void *);
107 static int32_t emlxs_fca_get_map(opaque_t, fc_lilpmap_t *);
108 static int32_t emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t,
109 uint32_t *, uint32_t);
110 static int32_t emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *);
111
112 static opaque_t emlxs_fca_get_device(opaque_t, fc_portid_t);
113 static int32_t emlxs_fca_notify(opaque_t, uint32_t);
114 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
115
116 /*
117 * Driver Internal Functions.
118 */
119
120 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
121 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t);
122 #ifdef EMLXS_I386
123 #ifdef S11
124 static int32_t emlxs_quiesce(dev_info_t *);
125 #endif /* S11 */
126 #endif /* EMLXS_I386 */
127 static int32_t emlxs_hba_resume(dev_info_t *);
128 static int32_t emlxs_hba_suspend(dev_info_t *);
129 static int32_t emlxs_hba_detach(dev_info_t *);
130 static int32_t emlxs_hba_attach(dev_info_t *);
131 static void emlxs_lock_destroy(emlxs_hba_t *);
132 static void emlxs_lock_init(emlxs_hba_t *);
133
134 char *emlxs_pm_components[] = {
135 "NAME=" DRIVER_NAME "000",
136 "0=Device D3 State",
137 "1=Device D0 State"
138 };
139
140
141 /*
142 * Default emlx dma limits
143 */
144 ddi_dma_lim_t emlxs_dma_lim = {
145 (uint32_t)0, /* dlim_addr_lo */
146 (uint32_t)0xffffffff, /* dlim_addr_hi */
147 (uint_t)0x00ffffff, /* dlim_cntr_max */
148 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */
149 1, /* dlim_minxfer */
150 0x00ffffff /* dlim_dmaspeed */
151 };
152
153 /*
154 * Be careful when using these attributes; the defaults listed below are
155 * (almost) the most general case, permitting allocation in almost any
156 * way supported by the LightPulse family. The sole exception is the
157 * alignment specified as requiring memory allocation on a 4-byte boundary;
158 * the Lightpulse can DMA memory on any byte boundary.
159 *
160 * The LightPulse family currently is limited to 16M transfers;
161 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
162 */
163 ddi_dma_attr_t emlxs_dma_attr = {
164 DMA_ATTR_V0, /* dma_attr_version */
165 (uint64_t)0, /* dma_attr_addr_lo */
166 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
167 (uint64_t)0x00ffffff, /* dma_attr_count_max */
168 1, /* dma_attr_align */
169 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
170 1, /* dma_attr_minxfer */
171 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
172 (uint64_t)0xffffffff, /* dma_attr_seg */
173 1, /* dma_attr_sgllen */
174 1, /* dma_attr_granular */
175 0 /* dma_attr_flags */
176 };
177
178 ddi_dma_attr_t emlxs_dma_attr_ro = {
179 DMA_ATTR_V0, /* dma_attr_version */
180 (uint64_t)0, /* dma_attr_addr_lo */
181 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
182 (uint64_t)0x00ffffff, /* dma_attr_count_max */
183 1, /* dma_attr_align */
184 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
185 1, /* dma_attr_minxfer */
186 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
187 (uint64_t)0xffffffff, /* dma_attr_seg */
188 1, /* dma_attr_sgllen */
189 1, /* dma_attr_granular */
190 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
191 };
192
193 ddi_dma_attr_t emlxs_dma_attr_1sg = {
194 DMA_ATTR_V0, /* dma_attr_version */
195 (uint64_t)0, /* dma_attr_addr_lo */
196 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
197 (uint64_t)0x00ffffff, /* dma_attr_count_max */
198 1, /* dma_attr_align */
199 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
200 1, /* dma_attr_minxfer */
201 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
202 (uint64_t)0xffffffff, /* dma_attr_seg */
203 1, /* dma_attr_sgllen */
204 1, /* dma_attr_granular */
205 0 /* dma_attr_flags */
206 };
207
208 #if (EMLXS_MODREV >= EMLXS_MODREV3)
209 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
210 DMA_ATTR_V0, /* dma_attr_version */
211 (uint64_t)0, /* dma_attr_addr_lo */
212 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
213 (uint64_t)0x00ffffff, /* dma_attr_count_max */
214 1, /* dma_attr_align */
215 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
216 1, /* dma_attr_minxfer */
217 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
218 (uint64_t)0xffffffff, /* dma_attr_seg */
219 1, /* dma_attr_sgllen */
220 1, /* dma_attr_granular */
221 0 /* dma_attr_flags */
222 };
223 #endif /* >= EMLXS_MODREV3 */
224
225 /*
226 * DDI access attributes for device
227 */
228 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
229 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
230 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */
231 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
232 DDI_DEFAULT_ACC /* devacc_attr_access */
233 };
234
235 /*
236 * DDI access attributes for data
237 */
238 ddi_device_acc_attr_t emlxs_data_acc_attr = {
239 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
240 DDI_NEVERSWAP_ACC, /* don't swap for Data */
241 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
242 DDI_DEFAULT_ACC /* devacc_attr_access */
243 };
244
245 /*
246 * Fill in the FC Transport structure,
247 * as defined in the Fibre Channel Transport Programmming Guide.
248 */
249 #if (EMLXS_MODREV == EMLXS_MODREV5)
250 static fc_fca_tran_t emlxs_fca_tran = {
251 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */
252 MAX_VPORTS, /* fca numerb of ports */
253 sizeof (emlxs_buf_t), /* fca pkt size */
254 2048, /* fca cmd max */
255 &emlxs_dma_lim, /* fca dma limits */
256 0, /* fca iblock, to be filled in later */
257 &emlxs_dma_attr, /* fca dma attributes */
258 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
259 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
260 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
261 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
262 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
263 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
264 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
265 &emlxs_data_acc_attr, /* fca access atributes */
266 0, /* fca_num_npivports */
267 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */
268 emlxs_fca_bind_port,
269 emlxs_fca_unbind_port,
270 emlxs_fca_pkt_init,
271 emlxs_fca_pkt_uninit,
272 emlxs_fca_transport,
273 emlxs_fca_get_cap,
274 emlxs_fca_set_cap,
275 emlxs_fca_get_map,
276 emlxs_fca_transport,
277 emlxs_fca_ub_alloc,
278 emlxs_fca_ub_free,
279 emlxs_fca_ub_release,
280 emlxs_fca_pkt_abort,
281 emlxs_fca_reset,
282 emlxs_fca_port_manage,
283 emlxs_fca_get_device,
284 emlxs_fca_notify
285 };
286 #endif /* EMLXS_MODREV5 */
287
288
289 #if (EMLXS_MODREV == EMLXS_MODREV4)
290 static fc_fca_tran_t emlxs_fca_tran = {
291 FCTL_FCA_MODREV_4, /* fca_version */
292 MAX_VPORTS, /* fca numerb of ports */
293 sizeof (emlxs_buf_t), /* fca pkt size */
294 2048, /* fca cmd max */
295 &emlxs_dma_lim, /* fca dma limits */
296 0, /* fca iblock, to be filled in later */
297 &emlxs_dma_attr, /* fca dma attributes */
298 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
299 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
300 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
301 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
302 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
303 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
304 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
305 &emlxs_data_acc_attr, /* fca access atributes */
306 emlxs_fca_bind_port,
307 emlxs_fca_unbind_port,
308 emlxs_fca_pkt_init,
309 emlxs_fca_pkt_uninit,
310 emlxs_fca_transport,
311 emlxs_fca_get_cap,
312 emlxs_fca_set_cap,
313 emlxs_fca_get_map,
314 emlxs_fca_transport,
315 emlxs_fca_ub_alloc,
316 emlxs_fca_ub_free,
317 emlxs_fca_ub_release,
318 emlxs_fca_pkt_abort,
319 emlxs_fca_reset,
320 emlxs_fca_port_manage,
321 emlxs_fca_get_device,
322 emlxs_fca_notify
323 };
324 #endif /* EMLXS_MODEREV4 */
325
326
327 #if (EMLXS_MODREV == EMLXS_MODREV3)
328 static fc_fca_tran_t emlxs_fca_tran = {
329 FCTL_FCA_MODREV_3, /* fca_version */
330 MAX_VPORTS, /* fca numerb of ports */
331 sizeof (emlxs_buf_t), /* fca pkt size */
332 2048, /* fca cmd max */
333 &emlxs_dma_lim, /* fca dma limits */
334 0, /* fca iblock, to be filled in later */
335 &emlxs_dma_attr, /* fca dma attributes */
336 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
337 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
338 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
339 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
340 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
341 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
342 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
343 &emlxs_data_acc_attr, /* fca access atributes */
344 emlxs_fca_bind_port,
345 emlxs_fca_unbind_port,
346 emlxs_fca_pkt_init,
347 emlxs_fca_pkt_uninit,
348 emlxs_fca_transport,
349 emlxs_fca_get_cap,
350 emlxs_fca_set_cap,
351 emlxs_fca_get_map,
352 emlxs_fca_transport,
353 emlxs_fca_ub_alloc,
354 emlxs_fca_ub_free,
355 emlxs_fca_ub_release,
356 emlxs_fca_pkt_abort,
357 emlxs_fca_reset,
358 emlxs_fca_port_manage,
359 emlxs_fca_get_device,
360 emlxs_fca_notify
361 };
362 #endif /* EMLXS_MODREV3 */
363
364
365 #if (EMLXS_MODREV == EMLXS_MODREV2)
366 static fc_fca_tran_t emlxs_fca_tran = {
367 FCTL_FCA_MODREV_2, /* fca_version */
368 MAX_VPORTS, /* number of ports */
369 sizeof (emlxs_buf_t), /* pkt size */
370 2048, /* max cmds */
371 &emlxs_dma_lim, /* DMA limits */
372 0, /* iblock, to be filled in later */
373 &emlxs_dma_attr, /* dma attributes */
374 &emlxs_data_acc_attr, /* access atributes */
375 emlxs_fca_bind_port,
376 emlxs_fca_unbind_port,
377 emlxs_fca_pkt_init,
378 emlxs_fca_pkt_uninit,
379 emlxs_fca_transport,
380 emlxs_fca_get_cap,
381 emlxs_fca_set_cap,
382 emlxs_fca_get_map,
383 emlxs_fca_transport,
384 emlxs_fca_ub_alloc,
385 emlxs_fca_ub_free,
386 emlxs_fca_ub_release,
387 emlxs_fca_pkt_abort,
388 emlxs_fca_reset,
389 emlxs_fca_port_manage,
390 emlxs_fca_get_device,
391 emlxs_fca_notify
392 };
393 #endif /* EMLXS_MODREV2 */
394
395
396 /*
397 * state pointer which the implementation uses as a place to
398 * hang a set of per-driver structures;
399 *
400 */
401 void *emlxs_soft_state = NULL;
402
403 /*
404 * Driver Global variables.
405 */
406 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */
407
408 emlxs_device_t emlxs_device;
409
410 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */
411 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */
412 uint32_t emlxs_instance_flag = 0; /* uses emlxs_device.lock */
413 #define EMLXS_FW_SHOW 0x00000001
414
415
416 /*
417 * CB ops vector. Used for administration only.
418 */
419 static struct cb_ops emlxs_cb_ops = {
420 emlxs_open, /* cb_open */
421 emlxs_close, /* cb_close */
422 nodev, /* cb_strategy */
423 nodev, /* cb_print */
424 nodev, /* cb_dump */
425 nodev, /* cb_read */
426 nodev, /* cb_write */
427 emlxs_ioctl, /* cb_ioctl */
428 nodev, /* cb_devmap */
429 nodev, /* cb_mmap */
430 nodev, /* cb_segmap */
431 nochpoll, /* cb_chpoll */
432 ddi_prop_op, /* cb_prop_op */
433 0, /* cb_stream */
434 #ifdef _LP64
435 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
436 #else
437 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
438 #endif
439 CB_REV, /* rev */
440 nodev, /* cb_aread */
441 nodev /* cb_awrite */
442 };
443
444 static struct dev_ops emlxs_ops = {
445 DEVO_REV, /* rev */
446 0, /* refcnt */
447 emlxs_info, /* getinfo */
448 nulldev, /* identify */
449 nulldev, /* probe */
450 emlxs_attach, /* attach */
451 emlxs_detach, /* detach */
452 nodev, /* reset */
453 &emlxs_cb_ops, /* devo_cb_ops */
454 NULL, /* devo_bus_ops */
455 emlxs_power, /* power ops */
456 #ifdef EMLXS_I386
457 #ifdef S11
458 emlxs_quiesce, /* quiesce */
459 #endif /* S11 */
460 #endif /* EMLXS_I386 */
461 };
462
463 #include <sys/modctl.h>
464 extern struct mod_ops mod_driverops;
465
466 #ifdef SAN_DIAG_SUPPORT
467 extern kmutex_t emlxs_sd_bucket_mutex;
468 extern sd_bucket_info_t emlxs_sd_bucket;
469 #endif /* SAN_DIAG_SUPPORT */
470
471 /*
472 * Module linkage information for the kernel.
473 */
474 static struct modldrv emlxs_modldrv = {
475 &mod_driverops, /* module type - driver */
476 emlxs_name, /* module name */
477 &emlxs_ops, /* driver ops */
478 };
479
480
481 /*
482 * Driver module linkage structure
483 */
484 static struct modlinkage emlxs_modlinkage = {
485 MODREV_1, /* ml_rev - must be MODREV_1 */
486 &emlxs_modldrv, /* ml_linkage */
487 NULL /* end of driver linkage */
488 };
489
490
491 /* We only need to add entries for non-default return codes. */
492 /* Entries do not need to be in order. */
493 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
494 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */
495
496 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
497 /* {f/w code, pkt_state, pkt_reason, */
498 /* pkt_expln, pkt_action} */
499
500 /* 0x00 - Do not remove */
501 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
502 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
503
504 /* 0x01 - Do not remove */
505 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
506 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
507
508 /* 0x02 */
509 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
510 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
511
512 /*
513 * This is a default entry.
514 * The real codes are written dynamically in emlxs_els.c
515 */
516 /* 0x09 */
517 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
518 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
519
520 /* Special error code */
521 /* 0x10 */
522 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
523 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
524
525 /* Special error code */
526 /* 0x11 */
527 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
528 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
529
530 /* Special error code */
531 /* 0x12 */
532 {IOSTAT_RSP_INVALID, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
533 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
534
535 /* CLASS 2 only */
536 /* 0x04 */
537 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
538 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
539
540 /* CLASS 2 only */
541 /* 0x05 */
542 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
543 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
544
545 /* CLASS 2 only */
546 /* 0x06 */
547 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
548 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
549
550 /* CLASS 2 only */
551 /* 0x07 */
552 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
553 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
554 };
555
556 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
557
558
559 /* We only need to add entries for non-default return codes. */
560 /* Entries do not need to be in order. */
561 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
562 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */
563
564 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
565 /* {f/w code, pkt_state, pkt_reason, */
566 /* pkt_expln, pkt_action} */
567
568 /* 0x01 */
569 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
570 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
571
572 /* 0x02 */
573 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
574 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
575
576 /* 0x04 */
577 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
578 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
579
580 /* 0x05 */
581 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
582 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
583
584 /* 0x06 */
585 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
586 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
587
588 /* 0x07 */
589 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
590 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
591
592 /* 0x08 */
593 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
594 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
595
596 /* 0x0B */
597 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
598 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
599
600 /* 0x0D */
601 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
602 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
603
604 /* 0x0E */
605 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
606 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
607
608 /* 0x0F */
609 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME,
610 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
611
612 /* 0x11 */
613 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
614 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
615
616 /* 0x13 */
617 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
618 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
619
620 /* 0x14 */
621 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
622 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
623
624 /* 0x15 */
625 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
626 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
627
628 /* 0x16 */
629 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
630 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
631
632 /* 0x17 */
633 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
634 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
635
636 /* 0x18 */
637 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
638 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
639
640 /* 0x1A */
641 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
642 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
643
644 /* 0x21 */
645 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
646 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
647
648 /* Occurs at link down */
649 /* 0x28 */
650 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
651 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
652
653 /* 0xF0 */
654 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
655 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
656 };
657
658 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
659
660
661
662 emlxs_table_t emlxs_error_table[] = {
663 {IOERR_SUCCESS, "No error."},
664 {IOERR_MISSING_CONTINUE, "Missing continue."},
665 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
666 {IOERR_INTERNAL_ERROR, "Internal error."},
667 {IOERR_INVALID_RPI, "Invalid RPI."},
668 {IOERR_NO_XRI, "No XRI."},
669 {IOERR_ILLEGAL_COMMAND, "Illegal command."},
670 {IOERR_XCHG_DROPPED, "Exchange dropped."},
671 {IOERR_ILLEGAL_FIELD, "Illegal field."},
672 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
673 {IOERR_TX_DMA_FAILED, "TX DMA failed."},
674 {IOERR_RX_DMA_FAILED, "RX DMA failed."},
675 {IOERR_ILLEGAL_FRAME, "Illegal frame."},
676 {IOERR_NO_RESOURCES, "No resources."},
677 {IOERR_ILLEGAL_LENGTH, "Illegal length."},
678 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
679 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
680 {IOERR_ABORT_REQUESTED, "Abort requested."},
681 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
682 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
683 {IOERR_RING_RESET, "Ring reset."},
684 {IOERR_LINK_DOWN, "Link down."},
685 {IOERR_CORRUPTED_DATA, "Corrupted data."},
686 {IOERR_CORRUPTED_RPI, "Corrupted RPI."},
687 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
688 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
689 {IOERR_DUP_FRAME, "Duplicate frame."},
690 {IOERR_LINK_CONTROL_FRAME, "Link control frame."},
691 {IOERR_BAD_HOST_ADDRESS, "Bad host address."},
692 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
693 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
694 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
695 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
696 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
697 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
698 {IOERR_XRIBUF_MISSING, "XRI buffer missing"},
699 {IOERR_ROFFSET_INVAL, "Relative offset invalid."},
700 {IOERR_ROFFSET_MISSING, "Relative offset missing."},
701 {IOERR_INSUF_BUFFER, "Buffer too small."},
702 {IOERR_MISSING_SI, "ELS frame missing SI"},
703 {IOERR_MISSING_ES, "Exhausted burst without ES"},
704 {IOERR_INCOMP_XFER, "Transfer incomplete."},
705 {IOERR_ABORT_TIMEOUT, "Abort timeout."}
706
707 }; /* emlxs_error_table */
708
709
710 emlxs_table_t emlxs_state_table[] = {
711 {IOSTAT_SUCCESS, "Success."},
712 {IOSTAT_FCP_RSP_ERROR, "FCP response error."},
713 {IOSTAT_REMOTE_STOP, "Remote stop."},
714 {IOSTAT_LOCAL_REJECT, "Local reject."},
715 {IOSTAT_NPORT_RJT, "NPort reject."},
716 {IOSTAT_FABRIC_RJT, "Fabric reject."},
717 {IOSTAT_NPORT_BSY, "Nport busy."},
718 {IOSTAT_FABRIC_BSY, "Fabric busy."},
719 {IOSTAT_INTERMED_RSP, "Intermediate response."},
720 {IOSTAT_LS_RJT, "LS reject."},
721 {IOSTAT_CMD_REJECT, "Cmd reject."},
722 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
723 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
724 {IOSTAT_DATA_UNDERRUN, "Data underrun."},
725 {IOSTAT_DATA_OVERRUN, "Data overrun."},
726 {IOSTAT_RSP_INVALID, "Response Invalid."},
727
728 }; /* emlxs_state_table */
729
730
731 #ifdef MENLO_SUPPORT
732 emlxs_table_t emlxs_menlo_cmd_table[] = {
733 {MENLO_CMD_INITIALIZE, "MENLO_INIT"},
734 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"},
735 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"},
736 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"},
737 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"},
738 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"},
739
740 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"},
741 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"},
742 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"},
743 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"},
744 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"},
745 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"},
746 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"},
747 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"},
748 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"},
749
750 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"},
751 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"},
752 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"},
753
754 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"},
755 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"},
756
757 {MENLO_CMD_RESET, "MENLO_RESET"},
758 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"}
759
760 }; /* emlxs_menlo_cmd_table */
761
762 emlxs_table_t emlxs_menlo_rsp_table[] = {
763 {MENLO_RSP_SUCCESS, "SUCCESS"},
764 {MENLO_ERR_FAILED, "FAILED"},
765 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"},
766 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"},
767 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"},
768 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"},
769 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"},
770 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"},
771 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"},
772 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"},
773 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"},
774 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"},
775 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"},
776 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"},
777 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"},
778 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"},
779 {MENLO_ERR_BUSY, "BUSY"},
780
781 }; /* emlxs_menlo_rsp_table */
782
783 #endif /* MENLO_SUPPORT */
784
785
786 emlxs_table_t emlxs_mscmd_table[] = {
787 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
788 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
789 {MS_GTIN, "MS_GTIN"},
790 {MS_GIEL, "MS_GIEL"},
791 {MS_GIET, "MS_GIET"},
792 {MS_GDID, "MS_GDID"},
793 {MS_GMID, "MS_GMID"},
794 {MS_GFN, "MS_GFN"},
795 {MS_GIELN, "MS_GIELN"},
796 {MS_GMAL, "MS_GMAL"},
797 {MS_GIEIL, "MS_GIEIL"},
798 {MS_GPL, "MS_GPL"},
799 {MS_GPT, "MS_GPT"},
800 {MS_GPPN, "MS_GPPN"},
801 {MS_GAPNL, "MS_GAPNL"},
802 {MS_GPS, "MS_GPS"},
803 {MS_GPSC, "MS_GPSC"},
804 {MS_GATIN, "MS_GATIN"},
805 {MS_GSES, "MS_GSES"},
806 {MS_GPLNL, "MS_GPLNL"},
807 {MS_GPLT, "MS_GPLT"},
808 {MS_GPLML, "MS_GPLML"},
809 {MS_GPAB, "MS_GPAB"},
810 {MS_GNPL, "MS_GNPL"},
811 {MS_GPNL, "MS_GPNL"},
812 {MS_GPFCP, "MS_GPFCP"},
813 {MS_GPLI, "MS_GPLI"},
814 {MS_GNID, "MS_GNID"},
815 {MS_RIELN, "MS_RIELN"},
816 {MS_RPL, "MS_RPL"},
817 {MS_RPLN, "MS_RPLN"},
818 {MS_RPLT, "MS_RPLT"},
819 {MS_RPLM, "MS_RPLM"},
820 {MS_RPAB, "MS_RPAB"},
821 {MS_RPFCP, "MS_RPFCP"},
822 {MS_RPLI, "MS_RPLI"},
823 {MS_DPL, "MS_DPL"},
824 {MS_DPLN, "MS_DPLN"},
825 {MS_DPLM, "MS_DPLM"},
826 {MS_DPLML, "MS_DPLML"},
827 {MS_DPLI, "MS_DPLI"},
828 {MS_DPAB, "MS_DPAB"},
829 {MS_DPALL, "MS_DPALL"}
830
831 }; /* emlxs_mscmd_table */
832
833
834 emlxs_table_t emlxs_ctcmd_table[] = {
835 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
836 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
837 {SLI_CTNS_GA_NXT, "GA_NXT"},
838 {SLI_CTNS_GPN_ID, "GPN_ID"},
839 {SLI_CTNS_GNN_ID, "GNN_ID"},
840 {SLI_CTNS_GCS_ID, "GCS_ID"},
841 {SLI_CTNS_GFT_ID, "GFT_ID"},
842 {SLI_CTNS_GSPN_ID, "GSPN_ID"},
843 {SLI_CTNS_GPT_ID, "GPT_ID"},
844 {SLI_CTNS_GID_PN, "GID_PN"},
845 {SLI_CTNS_GID_NN, "GID_NN"},
846 {SLI_CTNS_GIP_NN, "GIP_NN"},
847 {SLI_CTNS_GIPA_NN, "GIPA_NN"},
848 {SLI_CTNS_GSNN_NN, "GSNN_NN"},
849 {SLI_CTNS_GNN_IP, "GNN_IP"},
850 {SLI_CTNS_GIPA_IP, "GIPA_IP"},
851 {SLI_CTNS_GID_FT, "GID_FT"},
852 {SLI_CTNS_GID_PT, "GID_PT"},
853 {SLI_CTNS_RPN_ID, "RPN_ID"},
854 {SLI_CTNS_RNN_ID, "RNN_ID"},
855 {SLI_CTNS_RCS_ID, "RCS_ID"},
856 {SLI_CTNS_RFT_ID, "RFT_ID"},
857 {SLI_CTNS_RSPN_ID, "RSPN_ID"},
858 {SLI_CTNS_RPT_ID, "RPT_ID"},
859 {SLI_CTNS_RIP_NN, "RIP_NN"},
860 {SLI_CTNS_RIPA_NN, "RIPA_NN"},
861 {SLI_CTNS_RSNN_NN, "RSNN_NN"},
862 {SLI_CTNS_DA_ID, "DA_ID"},
863 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
864
865 }; /* emlxs_ctcmd_table */
866
867
868
869 emlxs_table_t emlxs_rmcmd_table[] = {
870 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
871 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
872 {CT_OP_GSAT, "RM_GSAT"},
873 {CT_OP_GHAT, "RM_GHAT"},
874 {CT_OP_GPAT, "RM_GPAT"},
875 {CT_OP_GDAT, "RM_GDAT"},
876 {CT_OP_GPST, "RM_GPST"},
877 {CT_OP_GDP, "RM_GDP"},
878 {CT_OP_GDPG, "RM_GDPG"},
879 {CT_OP_GEPS, "RM_GEPS"},
880 {CT_OP_GLAT, "RM_GLAT"},
881 {CT_OP_SSAT, "RM_SSAT"},
882 {CT_OP_SHAT, "RM_SHAT"},
883 {CT_OP_SPAT, "RM_SPAT"},
884 {CT_OP_SDAT, "RM_SDAT"},
885 {CT_OP_SDP, "RM_SDP"},
886 {CT_OP_SBBS, "RM_SBBS"},
887 {CT_OP_RPST, "RM_RPST"},
888 {CT_OP_VFW, "RM_VFW"},
889 {CT_OP_DFW, "RM_DFW"},
890 {CT_OP_RES, "RM_RES"},
891 {CT_OP_RHD, "RM_RHD"},
892 {CT_OP_UFW, "RM_UFW"},
893 {CT_OP_RDP, "RM_RDP"},
894 {CT_OP_GHDR, "RM_GHDR"},
895 {CT_OP_CHD, "RM_CHD"},
896 {CT_OP_SSR, "RM_SSR"},
897 {CT_OP_RSAT, "RM_RSAT"},
898 {CT_OP_WSAT, "RM_WSAT"},
899 {CT_OP_RSAH, "RM_RSAH"},
900 {CT_OP_WSAH, "RM_WSAH"},
901 {CT_OP_RACT, "RM_RACT"},
902 {CT_OP_WACT, "RM_WACT"},
903 {CT_OP_RKT, "RM_RKT"},
904 {CT_OP_WKT, "RM_WKT"},
905 {CT_OP_SSC, "RM_SSC"},
906 {CT_OP_QHBA, "RM_QHBA"},
907 {CT_OP_GST, "RM_GST"},
908 {CT_OP_GFTM, "RM_GFTM"},
909 {CT_OP_SRL, "RM_SRL"},
910 {CT_OP_SI, "RM_SI"},
911 {CT_OP_SRC, "RM_SRC"},
912 {CT_OP_GPB, "RM_GPB"},
913 {CT_OP_SPB, "RM_SPB"},
914 {CT_OP_RPB, "RM_RPB"},
915 {CT_OP_RAPB, "RM_RAPB"},
916 {CT_OP_GBC, "RM_GBC"},
917 {CT_OP_GBS, "RM_GBS"},
918 {CT_OP_SBS, "RM_SBS"},
919 {CT_OP_GANI, "RM_GANI"},
920 {CT_OP_GRV, "RM_GRV"},
921 {CT_OP_GAPBS, "RM_GAPBS"},
922 {CT_OP_APBC, "RM_APBC"},
923 {CT_OP_GDT, "RM_GDT"},
924 {CT_OP_GDLMI, "RM_GDLMI"},
925 {CT_OP_GANA, "RM_GANA"},
926 {CT_OP_GDLV, "RM_GDLV"},
927 {CT_OP_GWUP, "RM_GWUP"},
928 {CT_OP_GLM, "RM_GLM"},
929 {CT_OP_GABS, "RM_GABS"},
930 {CT_OP_SABS, "RM_SABS"},
931 {CT_OP_RPR, "RM_RPR"},
932 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
933
934 }; /* emlxs_rmcmd_table */
935
936
937 emlxs_table_t emlxs_elscmd_table[] = {
938 {ELS_CMD_ACC, "ACC"},
939 {ELS_CMD_LS_RJT, "LS_RJT"},
940 {ELS_CMD_PLOGI, "PLOGI"},
941 {ELS_CMD_FLOGI, "FLOGI"},
942 {ELS_CMD_LOGO, "LOGO"},
943 {ELS_CMD_ABTX, "ABTX"},
944 {ELS_CMD_RCS, "RCS"},
945 {ELS_CMD_RES, "RES"},
946 {ELS_CMD_RSS, "RSS"},
947 {ELS_CMD_RSI, "RSI"},
948 {ELS_CMD_ESTS, "ESTS"},
949 {ELS_CMD_ESTC, "ESTC"},
950 {ELS_CMD_ADVC, "ADVC"},
951 {ELS_CMD_RTV, "RTV"},
952 {ELS_CMD_RLS, "RLS"},
953 {ELS_CMD_ECHO, "ECHO"},
954 {ELS_CMD_TEST, "TEST"},
955 {ELS_CMD_RRQ, "RRQ"},
956 {ELS_CMD_REC, "REC"},
957 {ELS_CMD_PRLI, "PRLI"},
958 {ELS_CMD_PRLO, "PRLO"},
959 {ELS_CMD_SCN, "SCN"},
960 {ELS_CMD_TPLS, "TPLS"},
961 {ELS_CMD_GPRLO, "GPRLO"},
962 {ELS_CMD_GAID, "GAID"},
963 {ELS_CMD_FACT, "FACT"},
964 {ELS_CMD_FDACT, "FDACT"},
965 {ELS_CMD_NACT, "NACT"},
966 {ELS_CMD_NDACT, "NDACT"},
967 {ELS_CMD_QoSR, "QoSR"},
968 {ELS_CMD_RVCS, "RVCS"},
969 {ELS_CMD_PDISC, "PDISC"},
970 {ELS_CMD_FDISC, "FDISC"},
971 {ELS_CMD_ADISC, "ADISC"},
972 {ELS_CMD_FARP, "FARP"},
973 {ELS_CMD_FARPR, "FARPR"},
974 {ELS_CMD_FAN, "FAN"},
975 {ELS_CMD_RSCN, "RSCN"},
976 {ELS_CMD_SCR, "SCR"},
977 {ELS_CMD_LINIT, "LINIT"},
978 {ELS_CMD_RNID, "RNID"},
979 {ELS_CMD_AUTH, "AUTH"}
980
981 }; /* emlxs_elscmd_table */
982
983
984 emlxs_table_t emlxs_mode_table[] = {
985 {MODE_NONE, "NONE"},
986 {MODE_INITIATOR, "INITIATOR"},
987 {MODE_TARGET, "TARGET"},
988 {MODE_ALL, "INITIATOR | TARGET"}
989 }; /* emlxs_mode_table */
990
991 /*
992 *
993 * Device Driver Entry Routines
994 *
995 */
996
997 #ifdef MODSYM_SUPPORT
998 static void emlxs_fca_modclose();
999 static int emlxs_fca_modopen();
1000 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */
1001
1002 static int
emlxs_fca_modopen()1003 emlxs_fca_modopen()
1004 {
1005 int err;
1006
1007 if (emlxs_modsym.mod_fctl) {
1008 return (0);
1009 }
1010
1011 /* Leadville (fctl) */
1012 err = 0;
1013 emlxs_modsym.mod_fctl =
1014 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1015 if (!emlxs_modsym.mod_fctl) {
1016 cmn_err(CE_WARN,
1017 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1018 DRIVER_NAME, err);
1019
1020 goto failed;
1021 }
1022
1023 err = 0;
1024 /* Check if the fctl fc_fca_attach is present */
1025 emlxs_modsym.fc_fca_attach =
1026 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1027 &err);
1028 if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1029 cmn_err(CE_WARN,
1030 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1031 goto failed;
1032 }
1033
1034 err = 0;
1035 /* Check if the fctl fc_fca_detach is present */
1036 emlxs_modsym.fc_fca_detach =
1037 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1038 &err);
1039 if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1040 cmn_err(CE_WARN,
1041 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1042 goto failed;
1043 }
1044
1045 err = 0;
1046 /* Check if the fctl fc_fca_init is present */
1047 emlxs_modsym.fc_fca_init =
1048 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1049 if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1050 cmn_err(CE_WARN,
1051 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1052 goto failed;
1053 }
1054
1055 return (0);
1056
1057 failed:
1058
1059 emlxs_fca_modclose();
1060
1061 return (1);
1062
1063
1064 } /* emlxs_fca_modopen() */
1065
1066
1067 static void
emlxs_fca_modclose()1068 emlxs_fca_modclose()
1069 {
1070 if (emlxs_modsym.mod_fctl) {
1071 (void) ddi_modclose(emlxs_modsym.mod_fctl);
1072 emlxs_modsym.mod_fctl = 0;
1073 }
1074
1075 emlxs_modsym.fc_fca_attach = NULL;
1076 emlxs_modsym.fc_fca_detach = NULL;
1077 emlxs_modsym.fc_fca_init = NULL;
1078
1079 return;
1080
1081 } /* emlxs_fca_modclose() */
1082
1083 #endif /* MODSYM_SUPPORT */
1084
1085
1086
1087 /*
1088 * Global driver initialization, called once when driver is loaded
1089 */
1090 int
_init(void)1091 _init(void)
1092 {
1093 int ret;
1094
1095 /*
1096 * First init call for this driver,
1097 * so initialize the emlxs_dev_ctl structure.
1098 */
1099 bzero(&emlxs_device, sizeof (emlxs_device));
1100
1101 #ifdef MODSYM_SUPPORT
1102 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1103 #endif /* MODSYM_SUPPORT */
1104
1105 mutex_init(&emlxs_device.lock, NULL, MUTEX_DRIVER, NULL);
1106
1107 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1108 emlxs_device.drv_timestamp = ddi_get_time();
1109
1110 for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1111 emlxs_instance[ret] = (uint32_t)-1;
1112 }
1113
1114 /*
1115 * Provide for one ddiinst of the emlxs_dev_ctl structure
1116 * for each possible board in the system.
1117 */
1118 if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1119 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1120 cmn_err(CE_WARN,
1121 "?%s: _init: ddi_soft_state_init failed. rval=%x",
1122 DRIVER_NAME, ret);
1123
1124 return (ret);
1125 }
1126
1127 #ifdef MODSYM_SUPPORT
1128 /* Open SFS */
1129 (void) emlxs_fca_modopen();
1130 #endif /* MODSYM_SUPPORT */
1131
1132 /* Setup devops for SFS */
1133 MODSYM(fc_fca_init)(&emlxs_ops);
1134
1135 if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1136 (void) ddi_soft_state_fini(&emlxs_soft_state);
1137 #ifdef MODSYM_SUPPORT
1138 /* Close SFS */
1139 emlxs_fca_modclose();
1140 #endif /* MODSYM_SUPPORT */
1141
1142 return (ret);
1143 }
1144
1145 #ifdef SAN_DIAG_SUPPORT
1146 mutex_init(&emlxs_sd_bucket_mutex, NULL, MUTEX_DRIVER, NULL);
1147 #endif /* SAN_DIAG_SUPPORT */
1148
1149 return (ret);
1150
1151 } /* _init() */
1152
1153
1154 /*
1155 * Called when driver is unloaded.
1156 */
1157 int
_fini(void)1158 _fini(void)
1159 {
1160 int ret;
1161
1162 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1163 return (ret);
1164 }
1165 #ifdef MODSYM_SUPPORT
1166 /* Close SFS */
1167 emlxs_fca_modclose();
1168 #endif /* MODSYM_SUPPORT */
1169
1170 /*
1171 * Destroy the soft state structure
1172 */
1173 (void) ddi_soft_state_fini(&emlxs_soft_state);
1174
1175 /* Destroy the global device lock */
1176 mutex_destroy(&emlxs_device.lock);
1177
1178 #ifdef SAN_DIAG_SUPPORT
1179 mutex_destroy(&emlxs_sd_bucket_mutex);
1180 #endif /* SAN_DIAG_SUPPORT */
1181
1182 return (ret);
1183
1184 } /* _fini() */
1185
1186
1187
1188 int
_info(struct modinfo * modinfop)1189 _info(struct modinfo *modinfop)
1190 {
1191
1192 return (mod_info(&emlxs_modlinkage, modinfop));
1193
1194 } /* _info() */
1195
1196
1197 /*
1198 * Attach an ddiinst of an emlx host adapter.
1199 * Allocate data structures, initialize the adapter and we're ready to fly.
1200 */
1201 static int
emlxs_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1202 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1203 {
1204 emlxs_hba_t *hba;
1205 int ddiinst;
1206 int emlxinst;
1207 int rval;
1208
1209 switch (cmd) {
1210 case DDI_ATTACH:
1211 /* If successful this will set EMLXS_PM_IN_ATTACH */
1212 rval = emlxs_hba_attach(dip);
1213 break;
1214
1215 case DDI_RESUME:
1216 /* This will resume the driver */
1217 rval = emlxs_hba_resume(dip);
1218 break;
1219
1220 default:
1221 rval = DDI_FAILURE;
1222 }
1223
1224 if (rval == DDI_SUCCESS) {
1225 ddiinst = ddi_get_instance(dip);
1226 emlxinst = emlxs_get_instance(ddiinst);
1227 hba = emlxs_device.hba[emlxinst];
1228
1229 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1230
1231 /* Enable driver dump feature */
1232 mutex_enter(&EMLXS_PORT_LOCK);
1233 hba->flag |= FC_DUMP_SAFE;
1234 mutex_exit(&EMLXS_PORT_LOCK);
1235 }
1236 }
1237
1238 return (rval);
1239
1240 } /* emlxs_attach() */
1241
1242
1243 /*
1244 * Detach/prepare driver to unload (see detach(9E)).
1245 */
1246 static int
emlxs_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)1247 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1248 {
1249 emlxs_hba_t *hba;
1250 emlxs_port_t *port;
1251 int ddiinst;
1252 int emlxinst;
1253 int rval;
1254
1255 ddiinst = ddi_get_instance(dip);
1256 emlxinst = emlxs_get_instance(ddiinst);
1257 hba = emlxs_device.hba[emlxinst];
1258
1259 if (hba == NULL) {
1260 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1261
1262 return (DDI_FAILURE);
1263 }
1264
1265 if (hba == (emlxs_hba_t *)-1) {
1266 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1267 DRIVER_NAME);
1268
1269 return (DDI_FAILURE);
1270 }
1271
1272 port = &PPORT;
1273 rval = DDI_SUCCESS;
1274
1275 /* Check driver dump */
1276 mutex_enter(&EMLXS_PORT_LOCK);
1277
1278 if (hba->flag & FC_DUMP_ACTIVE) {
1279 mutex_exit(&EMLXS_PORT_LOCK);
1280
1281 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1282 "detach: Driver busy. Driver dump active.");
1283
1284 return (DDI_FAILURE);
1285 }
1286
1287 #ifdef SFCT_SUPPORT
1288 if ((port->flag & EMLXS_TGT_BOUND) &&
1289 ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1290 (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1291 mutex_exit(&EMLXS_PORT_LOCK);
1292
1293 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1294 "detach: Driver busy. Target mode active.");
1295
1296 return (DDI_FAILURE);
1297 }
1298 #endif /* SFCT_SUPPORT */
1299
1300 if (port->flag & EMLXS_INI_BOUND) {
1301 mutex_exit(&EMLXS_PORT_LOCK);
1302
1303 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1304 "detach: Driver busy. Initiator mode active.");
1305
1306 return (DDI_FAILURE);
1307 }
1308
1309 hba->flag &= ~FC_DUMP_SAFE;
1310
1311 mutex_exit(&EMLXS_PORT_LOCK);
1312
1313 switch (cmd) {
1314 case DDI_DETACH:
1315
1316 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1317 "DDI_DETACH");
1318
1319 rval = emlxs_hba_detach(dip);
1320
1321 if (rval != DDI_SUCCESS) {
1322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1323 "Unable to detach.");
1324 }
1325 break;
1326
1327 case DDI_SUSPEND:
1328
1329 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1330 "DDI_SUSPEND");
1331
1332 /* Suspend the driver */
1333 rval = emlxs_hba_suspend(dip);
1334
1335 if (rval != DDI_SUCCESS) {
1336 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1337 "Unable to suspend driver.");
1338 }
1339 break;
1340
1341 default:
1342 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1343 DRIVER_NAME, cmd);
1344 rval = DDI_FAILURE;
1345 }
1346
1347 if (rval == DDI_FAILURE) {
1348 /* Re-Enable driver dump feature */
1349 mutex_enter(&EMLXS_PORT_LOCK);
1350 hba->flag |= FC_DUMP_SAFE;
1351 mutex_exit(&EMLXS_PORT_LOCK);
1352 }
1353
1354 return (rval);
1355
1356 } /* emlxs_detach() */
1357
1358
1359 /* EMLXS_PORT_LOCK must be held when calling this */
1360 extern void
emlxs_port_init(emlxs_port_t * port)1361 emlxs_port_init(emlxs_port_t *port)
1362 {
1363 emlxs_hba_t *hba = HBA;
1364
1365 /* Initialize the base node */
1366 bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1367 port->node_base.nlp_Rpi = 0;
1368 port->node_base.nlp_DID = 0xffffff;
1369 port->node_base.nlp_list_next = NULL;
1370 port->node_base.nlp_list_prev = NULL;
1371 port->node_base.nlp_active = 1;
1372 port->node_base.nlp_base = 1;
1373 port->node_count = 0;
1374
1375 if (!(port->flag & EMLXS_PORT_ENABLED)) {
1376 uint8_t dummy_wwn[8] =
1377 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1378
1379 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1380 sizeof (NAME_TYPE));
1381 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1382 sizeof (NAME_TYPE));
1383 }
1384
1385 if (!(port->flag & EMLXS_PORT_CONFIG)) {
1386 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1387 (sizeof (port->snn)-1));
1388 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn,
1389 (sizeof (port->spn)-1));
1390 }
1391
1392 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1393 sizeof (SERV_PARM));
1394 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1395 sizeof (NAME_TYPE));
1396 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1397 sizeof (NAME_TYPE));
1398
1399 return;
1400
1401 } /* emlxs_port_init() */
1402
1403
1404 void
emlxs_disable_pcie_ce_err(emlxs_hba_t * hba)1405 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1406 {
1407 uint16_t reg;
1408
1409 if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) {
1410 return;
1411 }
1412
1413 /* Turn off the Correctable Error Reporting */
1414 /* (the Device Control Register, bit 0). */
1415 reg = ddi_get16(hba->pci_acc_handle,
1416 (uint16_t *)(hba->pci_addr +
1417 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1418 PCIE_DEVCTL));
1419
1420 reg &= ~1;
1421
1422 (void) ddi_put16(hba->pci_acc_handle,
1423 (uint16_t *)(hba->pci_addr +
1424 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1425 PCIE_DEVCTL),
1426 reg);
1427
1428 return;
1429
1430 } /* emlxs_disable_pcie_ce_err() */
1431
1432
1433 /*
1434 * emlxs_fca_bind_port
1435 *
1436 * Arguments:
1437 *
1438 * dip: the dev_info pointer for the ddiinst
1439 * port_info: pointer to info handed back to the transport
1440 * bind_info: pointer to info from the transport
1441 *
1442 * Return values: a port handle for this port, NULL for failure
1443 *
1444 */
1445 static opaque_t
emlxs_fca_bind_port(dev_info_t * dip,fc_fca_port_info_t * port_info,fc_fca_bind_info_t * bind_info)1446 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1447 fc_fca_bind_info_t *bind_info)
1448 {
1449 emlxs_hba_t *hba;
1450 emlxs_port_t *port;
1451 emlxs_port_t *pport;
1452 emlxs_port_t *vport;
1453 int ddiinst;
1454 emlxs_vpd_t *vpd;
1455 emlxs_config_t *cfg;
1456 char *dptr;
1457 char buffer[16];
1458 uint32_t length;
1459 uint32_t len;
1460 char topology[32];
1461 char linkspeed[32];
1462 uint32_t linkstate;
1463
1464 ddiinst = ddi_get_instance(dip);
1465 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1466 port = &PPORT;
1467 pport = &PPORT;
1468
1469 ddiinst = hba->ddiinst;
1470 vpd = &VPD;
1471 cfg = &CFG;
1472
1473 mutex_enter(&EMLXS_PORT_LOCK);
1474
1475 if (bind_info->port_num > 0) {
1476 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1477 if (!(hba->flag & FC_NPIV_ENABLED) ||
1478 !(bind_info->port_npiv) ||
1479 (bind_info->port_num > hba->vpi_max))
1480 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1481 if (!(hba->flag & FC_NPIV_ENABLED) ||
1482 (bind_info->port_num > hba->vpi_high))
1483 #endif
1484 {
1485 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1486 "fca_bind_port: Port %d not supported.",
1487 bind_info->port_num);
1488
1489 mutex_exit(&EMLXS_PORT_LOCK);
1490
1491 port_info->pi_error = FC_OUTOFBOUNDS;
1492 return (NULL);
1493 }
1494 }
1495
1496 /* Get true port pointer */
1497 port = &VPORT(bind_info->port_num);
1498
1499 /* Make sure the port is not already bound to the transport */
1500 if (port->flag & EMLXS_INI_BOUND) {
1501
1502 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1503 "fca_bind_port: Port %d already bound. flag=%x",
1504 bind_info->port_num, port->flag);
1505
1506 mutex_exit(&EMLXS_PORT_LOCK);
1507
1508 port_info->pi_error = FC_ALREADY;
1509 return (NULL);
1510 }
1511
1512 if (!(pport->flag & EMLXS_INI_ENABLED)) {
1513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1514 "fca_bind_port: Physical port does not support "
1515 "initiator mode.");
1516
1517 mutex_exit(&EMLXS_PORT_LOCK);
1518
1519 port_info->pi_error = FC_OUTOFBOUNDS;
1520 return (NULL);
1521 }
1522
1523 /* Make sure port enable flag is set */
1524 /* Just in case fca_port_unbind is called just prior to fca_port_bind */
1525 /* without a driver attach or resume operation */
1526 port->flag |= EMLXS_PORT_ENABLED;
1527
1528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1529 "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1530 bind_info->port_num, port_info, bind_info);
1531
1532 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1533 if (bind_info->port_npiv) {
1534 /* Leadville is telling us about a new virtual port */
1535 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1536 sizeof (NAME_TYPE));
1537 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1538 sizeof (NAME_TYPE));
1539 if (port->snn[0] == 0) {
1540 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1541 (sizeof (port->snn)-1));
1542
1543 }
1544
1545 if (port->spn[0] == 0) {
1546 (void) snprintf((caddr_t)port->spn,
1547 (sizeof (port->spn)-1), "%s VPort-%d",
1548 (caddr_t)hba->spn, port->vpi);
1549 }
1550 port->flag |= EMLXS_PORT_CONFIG;
1551 }
1552 #endif /* >= EMLXS_MODREV5 */
1553
1554 /*
1555 * Restricted login should apply both physical and
1556 * virtual ports.
1557 */
1558 if (cfg[CFG_VPORT_RESTRICTED].current) {
1559 port->flag |= EMLXS_PORT_RESTRICTED;
1560 }
1561
1562 /* Perform generic port initialization */
1563 emlxs_port_init(port);
1564
1565 /* Perform SFS specific initialization */
1566 port->ulp_handle = bind_info->port_handle;
1567 port->ulp_statec_cb = bind_info->port_statec_cb;
1568 port->ulp_unsol_cb = bind_info->port_unsol_cb;
1569
1570 /* Set the bound flag */
1571 port->flag |= EMLXS_INI_BOUND;
1572 hba->num_of_ports++;
1573
1574 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1575 mutex_exit(&EMLXS_PORT_LOCK);
1576 (void) emlxs_vpi_port_bind_notify(port);
1577 mutex_enter(&EMLXS_PORT_LOCK);
1578
1579 linkstate = (port->vpip->state == VPI_STATE_PORT_ONLINE) ?
1580 FC_LINK_UP : FC_LINK_DOWN;
1581 } else {
1582 linkstate = hba->state;
1583 }
1584
1585 /* Update the port info structure */
1586
1587 /* Set the topology and state */
1588 if (port->mode == MODE_TARGET) {
1589 port_info->pi_port_state = FC_STATE_OFFLINE;
1590 port_info->pi_topology = FC_TOP_UNKNOWN;
1591 } else if ((linkstate < FC_LINK_UP) ||
1592 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLED) ||
1593 !(hba->flag & FC_NPIV_SUPPORTED)))) {
1594 port_info->pi_port_state = FC_STATE_OFFLINE;
1595 port_info->pi_topology = FC_TOP_UNKNOWN;
1596 }
1597 #ifdef MENLO_SUPPORT
1598 else if (hba->flag & FC_MENLO_MODE) {
1599 port_info->pi_port_state = FC_STATE_OFFLINE;
1600 port_info->pi_topology = FC_TOP_UNKNOWN;
1601 }
1602 #endif /* MENLO_SUPPORT */
1603 else {
1604 /* Check for loop topology */
1605 if (hba->topology == TOPOLOGY_LOOP) {
1606 port_info->pi_port_state = FC_STATE_LOOP;
1607 (void) strlcpy(topology, ", loop", sizeof (topology));
1608
1609 if (hba->flag & FC_FABRIC_ATTACHED) {
1610 port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1611 } else {
1612 port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1613 }
1614 } else {
1615 port_info->pi_topology = FC_TOP_FABRIC;
1616 port_info->pi_port_state = FC_STATE_ONLINE;
1617 (void) strlcpy(topology, ", fabric", sizeof (topology));
1618 }
1619
1620 /* Set the link speed */
1621 switch (hba->linkspeed) {
1622 case 0:
1623 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1624 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1625 break;
1626
1627 case LA_1GHZ_LINK:
1628 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1629 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1630 break;
1631 case LA_2GHZ_LINK:
1632 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1633 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1634 break;
1635 case LA_4GHZ_LINK:
1636 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1637 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1638 break;
1639 case LA_8GHZ_LINK:
1640 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1641 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1642 break;
1643 case LA_10GHZ_LINK:
1644 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1645 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1646 break;
1647 case LA_16GHZ_LINK:
1648 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1649 port_info->pi_port_state |= FC_STATE_16GBIT_SPEED;
1650 break;
1651 case LA_32GHZ_LINK:
1652 (void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed));
1653 port_info->pi_port_state |= FC_STATE_32GBIT_SPEED;
1654 break;
1655 default:
1656 (void) snprintf(linkspeed, sizeof (linkspeed),
1657 "unknown(0x%x)", hba->linkspeed);
1658 break;
1659 }
1660
1661 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
1662 /* Adjusting port context for link up messages */
1663 vport = port;
1664 port = &PPORT;
1665 if (vport->vpi == 0) {
1666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1667 "%s%s, initiator",
1668 linkspeed, topology);
1669 } else if (!(hba->flag & FC_NPIV_LINKUP)) {
1670 hba->flag |= FC_NPIV_LINKUP;
1671 EMLXS_MSGF(EMLXS_CONTEXT,
1672 &emlxs_npiv_link_up_msg,
1673 "%s%s, initiator", linkspeed, topology);
1674 }
1675 port = vport;
1676 }
1677 }
1678
1679 /* PCIE Correctable Error Reporting workaround */
1680 if (((hba->model_info.chip == EMLXS_BE2_CHIP) ||
1681 (hba->model_info.chip == EMLXS_BE3_CHIP)) &&
1682 (bind_info->port_num == 0)) {
1683 emlxs_disable_pcie_ce_err(hba);
1684 }
1685
1686 /* Save initial state */
1687 port->ulp_statec = port_info->pi_port_state;
1688
1689 /*
1690 * The transport needs a copy of the common service parameters
1691 * for this port. The transport can get any updates through
1692 * the getcap entry point.
1693 */
1694 bcopy((void *) &port->sparam,
1695 (void *) &port_info->pi_login_params.common_service,
1696 sizeof (SERV_PARM));
1697
1698 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1699 /* Swap the service parameters for ULP */
1700 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1701 common_service);
1702 #endif /* EMLXS_MODREV2X */
1703
1704 port_info->pi_login_params.common_service.btob_credit = 0xffff;
1705
1706 bcopy((void *) &port->wwnn,
1707 (void *) &port_info->pi_login_params.node_ww_name,
1708 sizeof (NAME_TYPE));
1709
1710 bcopy((void *) &port->wwpn,
1711 (void *) &port_info->pi_login_params.nport_ww_name,
1712 sizeof (NAME_TYPE));
1713
1714 /*
1715 * We need to turn off CLASS2 support.
1716 * Otherwise, FC transport will use CLASS2 as default class
1717 * and never try with CLASS3.
1718 */
1719 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1720 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1721 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1722 port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1723 }
1724
1725 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1726 port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1727 }
1728 #else /* EMLXS_SPARC or EMLXS_MODREV2X */
1729 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1730 port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1731 }
1732
1733 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1734 port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1735 }
1736 #endif /* >= EMLXS_MODREV3X */
1737 #endif /* >= EMLXS_MODREV3 */
1738
1739
1740 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1741 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1742 port_info->pi_login_params.class_1.data[0] &= ~0x80;
1743 }
1744
1745 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1746 port_info->pi_login_params.class_2.data[0] &= ~0x80;
1747 }
1748 #endif /* <= EMLXS_MODREV2 */
1749
1750 /* Additional parameters */
1751 port_info->pi_s_id.port_id = port->did;
1752 port_info->pi_s_id.priv_lilp_posit = 0;
1753 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1754
1755 /* Initialize the RNID parameters */
1756 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1757
1758 (void) snprintf((char *)port_info->pi_rnid_params.params.global_id,
1759 (sizeof (port_info->pi_rnid_params.params.global_id)-1),
1760 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1761 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1762 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1763 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1764
1765 port_info->pi_rnid_params.params.unit_type = RNID_HBA;
1766 port_info->pi_rnid_params.params.port_id = port->did;
1767 port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1768
1769 /* Initialize the port attributes */
1770 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1771
1772 (void) strncpy(port_info->pi_attrs.manufacturer,
1773 hba->model_info.manufacturer,
1774 (sizeof (port_info->pi_attrs.manufacturer)-1));
1775
1776 port_info->pi_rnid_params.status = FC_SUCCESS;
1777
1778 (void) strncpy(port_info->pi_attrs.serial_number, vpd->serial_num,
1779 (sizeof (port_info->pi_attrs.serial_number)-1));
1780
1781 (void) snprintf(port_info->pi_attrs.firmware_version,
1782 (sizeof (port_info->pi_attrs.firmware_version)-1), "%s (%s)",
1783 vpd->fw_version, vpd->fw_label);
1784
1785 #ifdef EMLXS_I386
1786 (void) snprintf(port_info->pi_attrs.option_rom_version,
1787 (sizeof (port_info->pi_attrs.option_rom_version)-1),
1788 "Boot:%s", vpd->boot_version);
1789 #else /* EMLXS_SPARC */
1790 (void) snprintf(port_info->pi_attrs.option_rom_version,
1791 (sizeof (port_info->pi_attrs.option_rom_version)-1),
1792 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1793 #endif /* EMLXS_I386 */
1794
1795 (void) snprintf(port_info->pi_attrs.driver_version,
1796 (sizeof (port_info->pi_attrs.driver_version)-1), "%s (%s)",
1797 emlxs_version, emlxs_revision);
1798
1799 (void) strncpy(port_info->pi_attrs.driver_name, DRIVER_NAME,
1800 (sizeof (port_info->pi_attrs.driver_name)-1));
1801
1802 port_info->pi_attrs.vendor_specific_id =
1803 (hba->model_info.device_id << 16) | hba->model_info.vendor_id;
1804
1805 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1806
1807 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1808
1809 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1810 port_info->pi_rnid_params.params.num_attached = 0;
1811
1812 if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) {
1813 uint8_t byte;
1814 uint8_t *wwpn;
1815 uint32_t i;
1816 uint32_t j;
1817
1818 /* Copy the WWPN as a string into the local buffer */
1819 wwpn = (uint8_t *)&hba->wwpn;
1820 for (i = 0; i < 16; i++) {
1821 byte = *wwpn++;
1822 j = ((byte & 0xf0) >> 4);
1823 if (j <= 9) {
1824 buffer[i] =
1825 (char)((uint8_t)'0' + (uint8_t)j);
1826 } else {
1827 buffer[i] =
1828 (char)((uint8_t)'A' + (uint8_t)(j -
1829 10));
1830 }
1831
1832 i++;
1833 j = (byte & 0xf);
1834 if (j <= 9) {
1835 buffer[i] =
1836 (char)((uint8_t)'0' + (uint8_t)j);
1837 } else {
1838 buffer[i] =
1839 (char)((uint8_t)'A' + (uint8_t)(j -
1840 10));
1841 }
1842 }
1843
1844 port_info->pi_attrs.hba_fru_details.port_index = 0;
1845 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4))
1846
1847 } else if (hba->flag & FC_NPIV_ENABLED) {
1848 uint8_t byte;
1849 uint8_t *wwpn;
1850 uint32_t i;
1851 uint32_t j;
1852
1853 /* Copy the WWPN as a string into the local buffer */
1854 wwpn = (uint8_t *)&hba->wwpn;
1855 for (i = 0; i < 16; i++) {
1856 byte = *wwpn++;
1857 j = ((byte & 0xf0) >> 4);
1858 if (j <= 9) {
1859 buffer[i] =
1860 (char)((uint8_t)'0' + (uint8_t)j);
1861 } else {
1862 buffer[i] =
1863 (char)((uint8_t)'A' + (uint8_t)(j -
1864 10));
1865 }
1866
1867 i++;
1868 j = (byte & 0xf);
1869 if (j <= 9) {
1870 buffer[i] =
1871 (char)((uint8_t)'0' + (uint8_t)j);
1872 } else {
1873 buffer[i] =
1874 (char)((uint8_t)'A' + (uint8_t)(j -
1875 10));
1876 }
1877 }
1878
1879 port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1880 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1881
1882 } else {
1883 /* Copy the serial number string (right most 16 chars) */
1884 /* into the right justified local buffer */
1885 bzero(buffer, sizeof (buffer));
1886 length = strlen(vpd->serial_num);
1887 len = (length > 16) ? 16 : length;
1888 bcopy(&vpd->serial_num[(length - len)],
1889 &buffer[(sizeof (buffer) - len)], len);
1890
1891 port_info->pi_attrs.hba_fru_details.port_index =
1892 vpd->port_index;
1893 }
1894
1895 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1896 dptr[0] = buffer[0];
1897 dptr[1] = buffer[1];
1898 dptr[2] = buffer[2];
1899 dptr[3] = buffer[3];
1900 dptr[4] = buffer[4];
1901 dptr[5] = buffer[5];
1902 dptr[6] = buffer[6];
1903 dptr[7] = buffer[7];
1904 port_info->pi_attrs.hba_fru_details.high =
1905 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1906
1907 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1908 dptr[0] = buffer[8];
1909 dptr[1] = buffer[9];
1910 dptr[2] = buffer[10];
1911 dptr[3] = buffer[11];
1912 dptr[4] = buffer[12];
1913 dptr[5] = buffer[13];
1914 dptr[6] = buffer[14];
1915 dptr[7] = buffer[15];
1916 port_info->pi_attrs.hba_fru_details.low =
1917 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1918
1919 #endif /* >= EMLXS_MODREV3 */
1920
1921 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1922 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1923 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1924 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1925 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1926 #endif /* >= EMLXS_MODREV4 */
1927
1928 (void) snprintf(port_info->pi_attrs.hardware_version,
1929 (sizeof (port_info->pi_attrs.hardware_version)-1),
1930 "%x", vpd->biuRev);
1931
1932 /* Set the hba speed limit */
1933 if (vpd->link_speed & LMT_32GB_CAPABLE) {
1934 port_info->pi_attrs.supported_speed |=
1935 FC_HBA_PORTSPEED_32GBIT;
1936 }
1937 if (vpd->link_speed & LMT_16GB_CAPABLE) {
1938 port_info->pi_attrs.supported_speed |=
1939 FC_HBA_PORTSPEED_16GBIT;
1940 }
1941 if (vpd->link_speed & LMT_10GB_CAPABLE) {
1942 port_info->pi_attrs.supported_speed |=
1943 FC_HBA_PORTSPEED_10GBIT;
1944 }
1945 if (vpd->link_speed & LMT_8GB_CAPABLE) {
1946 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1947 }
1948 if (vpd->link_speed & LMT_4GB_CAPABLE) {
1949 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1950 }
1951 if (vpd->link_speed & LMT_2GB_CAPABLE) {
1952 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1953 }
1954 if (vpd->link_speed & LMT_1GB_CAPABLE) {
1955 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1956 }
1957
1958 /* Set the hba model info */
1959 (void) strncpy(port_info->pi_attrs.model, hba->model_info.model,
1960 (sizeof (port_info->pi_attrs.model)-1));
1961 (void) strncpy(port_info->pi_attrs.model_description,
1962 hba->model_info.model_desc,
1963 (sizeof (port_info->pi_attrs.model_description)-1));
1964
1965
1966 /* Log information */
1967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1968 "Bind info: port_num = %d", bind_info->port_num);
1969 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1970 "Bind info: port_handle = %p", bind_info->port_handle);
1971
1972 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1974 "Bind info: port_npiv = %d", bind_info->port_npiv);
1975 #endif /* >= EMLXS_MODREV5 */
1976
1977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1978 "Port info: pi_topology = %x", port_info->pi_topology);
1979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1980 "Port info: pi_error = %x", port_info->pi_error);
1981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1982 "Port info: pi_port_state = %x", port_info->pi_port_state);
1983
1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1985 "Port info: port_id = %x", port_info->pi_s_id.port_id);
1986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1987 "Port info: priv_lilp_posit = %x",
1988 port_info->pi_s_id.priv_lilp_posit);
1989
1990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1991 "Port info: hard_addr = %x",
1992 port_info->pi_hard_addr.hard_addr);
1993
1994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1995 "Port info: rnid.status = %x",
1996 port_info->pi_rnid_params.status);
1997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1998 "Port info: rnid.global_id = %16s",
1999 port_info->pi_rnid_params.params.global_id);
2000 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2001 "Port info: rnid.unit_type = %x",
2002 port_info->pi_rnid_params.params.unit_type);
2003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2004 "Port info: rnid.port_id = %x",
2005 port_info->pi_rnid_params.params.port_id);
2006 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2007 "Port info: rnid.num_attached = %x",
2008 port_info->pi_rnid_params.params.num_attached);
2009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2010 "Port info: rnid.ip_version = %x",
2011 port_info->pi_rnid_params.params.ip_version);
2012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2013 "Port info: rnid.udp_port = %x",
2014 port_info->pi_rnid_params.params.udp_port);
2015 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2016 "Port info: rnid.ip_addr = %16s",
2017 port_info->pi_rnid_params.params.ip_addr);
2018 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2019 "Port info: rnid.spec_id_resv = %x",
2020 port_info->pi_rnid_params.params.specific_id_resv);
2021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2022 "Port info: rnid.topo_flags = %x",
2023 port_info->pi_rnid_params.params.topo_flags);
2024
2025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2026 "Port info: manufacturer = %s",
2027 port_info->pi_attrs.manufacturer);
2028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2029 "Port info: serial_num = %s",
2030 port_info->pi_attrs.serial_number);
2031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2032 "Port info: model = %s", port_info->pi_attrs.model);
2033 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2034 "Port info: model_description = %s",
2035 port_info->pi_attrs.model_description);
2036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2037 "Port info: hardware_version = %s",
2038 port_info->pi_attrs.hardware_version);
2039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2040 "Port info: driver_version = %s",
2041 port_info->pi_attrs.driver_version);
2042 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2043 "Port info: option_rom_version = %s",
2044 port_info->pi_attrs.option_rom_version);
2045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2046 "Port info: firmware_version = %s",
2047 port_info->pi_attrs.firmware_version);
2048 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2049 "Port info: driver_name = %s",
2050 port_info->pi_attrs.driver_name);
2051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2052 "Port info: vendor_specific_id = %x",
2053 port_info->pi_attrs.vendor_specific_id);
2054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2055 "Port info: supported_cos = %x",
2056 port_info->pi_attrs.supported_cos);
2057 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2058 "Port info: supported_speed = %x",
2059 port_info->pi_attrs.supported_speed);
2060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2061 "Port info: max_frame_size = %x",
2062 port_info->pi_attrs.max_frame_size);
2063
2064 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2065 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2066 "Port info: fru_port_index = %x",
2067 port_info->pi_attrs.hba_fru_details.port_index);
2068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2069 "Port info: fru_high = %llx",
2070 port_info->pi_attrs.hba_fru_details.high);
2071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2072 "Port info: fru_low = %llx",
2073 port_info->pi_attrs.hba_fru_details.low);
2074 #endif /* >= EMLXS_MODREV3 */
2075
2076 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2077 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2078 "Port info: sym_node_name = %s",
2079 port_info->pi_attrs.sym_node_name);
2080 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2081 "Port info: sym_port_name = %s",
2082 port_info->pi_attrs.sym_port_name);
2083 #endif /* >= EMLXS_MODREV4 */
2084
2085 mutex_exit(&EMLXS_PORT_LOCK);
2086
2087 #ifdef SFCT_SUPPORT
2088 if (port->flag & EMLXS_TGT_ENABLED) {
2089 emlxs_fct_bind_port(port);
2090 }
2091 #endif /* SFCT_SUPPORT */
2092
2093 return ((opaque_t)port);
2094
2095 } /* emlxs_fca_bind_port() */
2096
2097
2098 static void
emlxs_fca_unbind_port(opaque_t fca_port_handle)2099 emlxs_fca_unbind_port(opaque_t fca_port_handle)
2100 {
2101 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2102 emlxs_hba_t *hba = HBA;
2103
2104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2105 "fca_unbind_port: port=%p", port);
2106
2107 if (!(port->flag & EMLXS_PORT_BOUND)) {
2108 return;
2109 }
2110
2111 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2112 (void) emlxs_vpi_port_unbind_notify(port, 1);
2113 }
2114
2115 /* Destroy & flush all port nodes, if they exist */
2116 if (port->node_count) {
2117 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2118 }
2119
2120 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2121 if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) &&
2122 (hba->flag & FC_NPIV_ENABLED) &&
2123 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED))) {
2124 (void) emlxs_mb_unreg_vpi(port);
2125 }
2126 #endif
2127
2128 mutex_enter(&EMLXS_PORT_LOCK);
2129 if (port->flag & EMLXS_INI_BOUND) {
2130 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2131 port->flag &= ~(EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
2132 #endif
2133 port->flag &= ~EMLXS_INI_BOUND;
2134 hba->num_of_ports--;
2135
2136 /* Wait until ulp callback interface is idle */
2137 while (port->ulp_busy) {
2138 mutex_exit(&EMLXS_PORT_LOCK);
2139 delay(drv_usectohz(500000));
2140 mutex_enter(&EMLXS_PORT_LOCK);
2141 }
2142
2143 port->ulp_handle = 0;
2144 port->ulp_statec = FC_STATE_OFFLINE;
2145 port->ulp_statec_cb = NULL;
2146 port->ulp_unsol_cb = NULL;
2147 }
2148 mutex_exit(&EMLXS_PORT_LOCK);
2149
2150 #ifdef SFCT_SUPPORT
2151 /* Check if port was target bound */
2152 if (port->flag & EMLXS_TGT_BOUND) {
2153 emlxs_fct_unbind_port(port);
2154 }
2155 #endif /* SFCT_SUPPORT */
2156
2157 return;
2158
2159 } /* emlxs_fca_unbind_port() */
2160
2161
2162 /*ARGSUSED*/
2163 extern int
emlxs_fca_pkt_init(opaque_t fca_port_handle,fc_packet_t * pkt,int32_t sleep)2164 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2165 {
2166 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2167 emlxs_hba_t *hba = HBA;
2168 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2169
2170 if (!sbp) {
2171 return (FC_FAILURE);
2172 }
2173 bzero((void *)sbp, sizeof (emlxs_buf_t));
2174
2175 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
2176 sbp->pkt_flags =
2177 PACKET_VALID | PACKET_ULP_OWNED;
2178 sbp->port = port;
2179 sbp->pkt = pkt;
2180 sbp->iocbq.sbp = sbp;
2181
2182 return (FC_SUCCESS);
2183
2184 } /* emlxs_fca_pkt_init() */
2185
2186
2187
2188 static void
emlxs_initialize_pkt(emlxs_port_t * port,emlxs_buf_t * sbp)2189 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2190 {
2191 emlxs_hba_t *hba = HBA;
2192 emlxs_config_t *cfg = &CFG;
2193 fc_packet_t *pkt = PRIV2PKT(sbp);
2194
2195 mutex_enter(&sbp->mtx);
2196
2197 /* Reinitialize */
2198 sbp->pkt = pkt;
2199 sbp->port = port;
2200 sbp->bmp = NULL;
2201 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2202 sbp->iotag = 0;
2203 sbp->ticks = 0;
2204 sbp->abort_attempts = 0;
2205 sbp->fpkt = NULL;
2206 sbp->flush_count = 0;
2207 sbp->next = NULL;
2208
2209 if (port->mode == MODE_INITIATOR) {
2210 sbp->node = NULL;
2211 sbp->did = 0;
2212 sbp->lun = EMLXS_LUN_NONE;
2213 sbp->class = 0;
2214 sbp->channel = NULL;
2215 }
2216
2217 bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2218 sbp->iocbq.sbp = sbp;
2219
2220 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2221 ddi_in_panic()) {
2222 sbp->pkt_flags |= PACKET_POLLED;
2223 }
2224
2225 /* Prepare the fc packet */
2226 pkt->pkt_state = FC_PKT_SUCCESS;
2227 pkt->pkt_reason = 0;
2228 pkt->pkt_action = 0;
2229 pkt->pkt_expln = 0;
2230 pkt->pkt_data_resid = 0;
2231 pkt->pkt_resp_resid = 0;
2232
2233 /* Make sure all pkt's have a proper timeout */
2234 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2235 /* This disables all IOCB on chip timeouts */
2236 pkt->pkt_timeout = 0x80000000;
2237 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2238 pkt->pkt_timeout = 60;
2239 }
2240
2241 /* Clear the response buffer */
2242 if (pkt->pkt_rsplen) {
2243 bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2244 }
2245
2246 mutex_exit(&sbp->mtx);
2247
2248 return;
2249
2250 } /* emlxs_initialize_pkt() */
2251
2252
2253
2254 /*
2255 * We may not need this routine
2256 */
2257 /*ARGSUSED*/
2258 extern int
emlxs_fca_pkt_uninit(opaque_t fca_port_handle,fc_packet_t * pkt)2259 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2260 {
2261 emlxs_buf_t *sbp = PKT2PRIV(pkt);
2262
2263 if (!sbp) {
2264 return (FC_FAILURE);
2265 }
2266
2267 if (!(sbp->pkt_flags & PACKET_VALID)) {
2268 return (FC_FAILURE);
2269 }
2270 sbp->pkt_flags &= ~PACKET_VALID;
2271 mutex_destroy(&sbp->mtx);
2272
2273 return (FC_SUCCESS);
2274
2275 } /* emlxs_fca_pkt_uninit() */
2276
2277
2278 static int
emlxs_fca_get_cap(opaque_t fca_port_handle,char * cap,void * ptr)2279 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2280 {
2281 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2282 emlxs_hba_t *hba = HBA;
2283 int32_t rval;
2284 emlxs_config_t *cfg = &CFG;
2285
2286 if (!(port->flag & EMLXS_INI_BOUND)) {
2287 return (FC_CAP_ERROR);
2288 }
2289
2290 if (strcmp(cap, FC_NODE_WWN) == 0) {
2291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2292 "fca_get_cap: FC_NODE_WWN");
2293
2294 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2295 rval = FC_CAP_FOUND;
2296
2297 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2298 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2299 "fca_get_cap: FC_LOGIN_PARAMS");
2300
2301 /*
2302 * We need to turn off CLASS2 support.
2303 * Otherwise, FC transport will use CLASS2 as default class
2304 * and never try with CLASS3.
2305 */
2306 hba->sparam.cls2.classValid = 0;
2307
2308 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2309
2310 rval = FC_CAP_FOUND;
2311
2312 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2313 int32_t *num_bufs;
2314
2315 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2316 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2317 cfg[CFG_UB_BUFS].current);
2318
2319 num_bufs = (int32_t *)ptr;
2320
2321 /* We multiply by MAX_VPORTS because ULP uses a */
2322 /* formula to calculate ub bufs from this */
2323 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2324
2325 rval = FC_CAP_FOUND;
2326
2327 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2328 int32_t *size;
2329
2330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2331 "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2332
2333 size = (int32_t *)ptr;
2334 *size = -1;
2335 rval = FC_CAP_FOUND;
2336
2337 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2338 fc_reset_action_t *action;
2339
2340 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2341 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2342
2343 action = (fc_reset_action_t *)ptr;
2344 *action = FC_RESET_RETURN_ALL;
2345 rval = FC_CAP_FOUND;
2346
2347 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2348 fc_dma_behavior_t *behavior;
2349
2350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2351 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2352
2353 behavior = (fc_dma_behavior_t *)ptr;
2354 *behavior = FC_ALLOW_STREAMING;
2355 rval = FC_CAP_FOUND;
2356
2357 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2358 fc_fcp_dma_t *fcp_dma;
2359
2360 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2361 "fca_get_cap: FC_CAP_FCP_DMA");
2362
2363 fcp_dma = (fc_fcp_dma_t *)ptr;
2364 *fcp_dma = FC_DVMA_SPACE;
2365 rval = FC_CAP_FOUND;
2366
2367 } else {
2368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2369 "fca_get_cap: Unknown capability. [%s]", cap);
2370
2371 rval = FC_CAP_ERROR;
2372
2373 }
2374
2375 return (rval);
2376
2377 } /* emlxs_fca_get_cap() */
2378
2379
2380
2381 static int
emlxs_fca_set_cap(opaque_t fca_port_handle,char * cap,void * ptr)2382 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2383 {
2384 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2385
2386 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2387 "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2388
2389 return (FC_CAP_ERROR);
2390
2391 } /* emlxs_fca_set_cap() */
2392
2393
2394 static opaque_t
emlxs_fca_get_device(opaque_t fca_port_handle,fc_portid_t d_id)2395 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2396 {
2397 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2398
2399 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2400 "fca_get_device: did=%x", d_id.port_id);
2401
2402 return (NULL);
2403
2404 } /* emlxs_fca_get_device() */
2405
2406
2407 static int32_t
emlxs_fca_notify(opaque_t fca_port_handle,uint32_t cmd)2408 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd)
2409 {
2410 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2411
2412 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2413 cmd);
2414
2415 return (FC_SUCCESS);
2416
2417 } /* emlxs_fca_notify */
2418
2419
2420
2421 static int
emlxs_fca_get_map(opaque_t fca_port_handle,fc_lilpmap_t * mapbuf)2422 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2423 {
2424 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2425 emlxs_hba_t *hba = HBA;
2426 uint32_t lilp_length;
2427
2428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2429 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2430 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2431 port->alpa_map[3], port->alpa_map[4]);
2432
2433 if (!(port->flag & EMLXS_INI_BOUND)) {
2434 return (FC_NOMAP);
2435 }
2436
2437 if (hba->topology != TOPOLOGY_LOOP) {
2438 return (FC_NOMAP);
2439 }
2440
2441 /* Check if alpa map is available */
2442 if (port->alpa_map[0] != 0) {
2443 mapbuf->lilp_magic = MAGIC_LILP;
2444 } else { /* No LILP map available */
2445
2446 /* Set lilp_magic to MAGIC_LISA and this will */
2447 /* trigger an ALPA scan in ULP */
2448 mapbuf->lilp_magic = MAGIC_LISA;
2449 }
2450
2451 mapbuf->lilp_myalpa = port->did;
2452
2453 /* The first byte of the alpa_map is the lilp map length */
2454 /* Add one to include the lilp length byte itself */
2455 lilp_length = (uint32_t)port->alpa_map[0] + 1;
2456
2457 /* Make sure the max transfer is 128 bytes */
2458 if (lilp_length > 128) {
2459 lilp_length = 128;
2460 }
2461
2462 /* We start copying from the lilp_length field */
2463 /* in order to get a word aligned address */
2464 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2465 lilp_length);
2466
2467 return (FC_SUCCESS);
2468
2469 } /* emlxs_fca_get_map() */
2470
2471
2472
2473 extern int
emlxs_fca_transport(opaque_t fca_port_handle,fc_packet_t * pkt)2474 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2475 {
2476 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2477 emlxs_hba_t *hba = HBA;
2478 emlxs_buf_t *sbp;
2479 uint32_t rval;
2480 uint32_t pkt_flags;
2481
2482 /* Validate packet */
2483 sbp = PKT2PRIV(pkt);
2484
2485 /* Make sure adapter is online */
2486 if (!(hba->flag & FC_ONLINE_MODE) &&
2487 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2488 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2489 "Adapter offline.");
2490
2491 rval = (hba->flag & FC_ONLINING_MODE) ?
2492 FC_TRAN_BUSY : FC_OFFLINE;
2493 return (rval);
2494 }
2495
2496 /* Make sure ULP was told that the port was online */
2497 if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2498 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2499 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2500 "Port offline.");
2501
2502 return (FC_OFFLINE);
2503 }
2504
2505 if (sbp->port != port) {
2506 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2507 "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2508 sbp->port, sbp->pkt_flags);
2509 return (FC_BADPACKET);
2510 }
2511
2512 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2514 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2515 sbp->port, sbp->pkt_flags);
2516 return (FC_BADPACKET);
2517 }
2518
2519 #ifdef SFCT_SUPPORT
2520 if ((port->mode == MODE_TARGET) && !sbp->fct_cmd &&
2521 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2523 "Packet blocked. Target mode.");
2524 return (FC_TRANSPORT_ERROR);
2525 }
2526 #endif /* SFCT_SUPPORT */
2527
2528 #ifdef IDLE_TIMER
2529 emlxs_pm_busy_component(hba);
2530 #endif /* IDLE_TIMER */
2531
2532 /* Prepare the packet for transport */
2533 emlxs_initialize_pkt(port, sbp);
2534
2535 /* Save a copy of the pkt flags. */
2536 /* We will check the polling flag later */
2537 pkt_flags = sbp->pkt_flags;
2538
2539 /* Send the packet */
2540 switch (pkt->pkt_tran_type) {
2541 case FC_PKT_FCP_READ:
2542 case FC_PKT_FCP_WRITE:
2543 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2544 break;
2545
2546 case FC_PKT_IP_WRITE:
2547 case FC_PKT_BROADCAST:
2548 rval = emlxs_send_ip(port, sbp);
2549 break;
2550
2551 case FC_PKT_EXCHANGE:
2552 switch (pkt->pkt_cmd_fhdr.type) {
2553 case FC_TYPE_SCSI_FCP:
2554 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2555 break;
2556
2557 case FC_TYPE_FC_SERVICES:
2558 rval = emlxs_send_ct(port, sbp);
2559 break;
2560
2561 #ifdef MENLO_SUPPORT
2562 case EMLXS_MENLO_TYPE:
2563 rval = emlxs_send_menlo(port, sbp);
2564 break;
2565 #endif /* MENLO_SUPPORT */
2566
2567 default:
2568 rval = emlxs_send_els(port, sbp);
2569 }
2570 break;
2571
2572 case FC_PKT_OUTBOUND:
2573 switch (pkt->pkt_cmd_fhdr.type) {
2574 #ifdef SFCT_SUPPORT
2575 case FC_TYPE_SCSI_FCP:
2576 rval = emlxs_send_fct_status(port, sbp);
2577 break;
2578
2579 case FC_TYPE_BASIC_LS:
2580 rval = emlxs_send_fct_abort(port, sbp);
2581 break;
2582 #endif /* SFCT_SUPPORT */
2583
2584 case FC_TYPE_FC_SERVICES:
2585 rval = emlxs_send_ct_rsp(port, sbp);
2586 break;
2587 #ifdef MENLO_SUPPORT
2588 case EMLXS_MENLO_TYPE:
2589 rval = emlxs_send_menlo(port, sbp);
2590 break;
2591 #endif /* MENLO_SUPPORT */
2592
2593 default:
2594 rval = emlxs_send_els_rsp(port, sbp);
2595 }
2596 break;
2597
2598 default:
2599 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2600 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2601 rval = FC_TRANSPORT_ERROR;
2602 break;
2603 }
2604
2605 /* Check if send was not successful */
2606 if (rval != FC_SUCCESS) {
2607 /* Return packet to ULP */
2608 mutex_enter(&sbp->mtx);
2609 sbp->pkt_flags |= PACKET_ULP_OWNED;
2610 mutex_exit(&sbp->mtx);
2611
2612 return (rval);
2613 }
2614
2615 /* Check if this packet should be polled for completion before */
2616 /* returning. This check must be done with a saved copy of the */
2617 /* pkt_flags because the packet itself could already be freed from */
2618 /* memory if it was not polled. */
2619 if (pkt_flags & PACKET_POLLED) {
2620 emlxs_poll(port, sbp);
2621 }
2622
2623 return (FC_SUCCESS);
2624
2625 } /* emlxs_fca_transport() */
2626
2627
2628
2629 static void
emlxs_poll(emlxs_port_t * port,emlxs_buf_t * sbp)2630 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2631 {
2632 emlxs_hba_t *hba = HBA;
2633 fc_packet_t *pkt = PRIV2PKT(sbp);
2634 clock_t timeout;
2635 clock_t time;
2636 CHANNEL *cp;
2637 int in_panic = 0;
2638
2639 mutex_enter(&EMLXS_PORT_LOCK);
2640 hba->io_poll_count++;
2641 mutex_exit(&EMLXS_PORT_LOCK);
2642
2643 /* Check for panic situation */
2644 cp = (CHANNEL *)sbp->channel;
2645
2646 if (ddi_in_panic()) {
2647 in_panic = 1;
2648 /*
2649 * In panic situations there will be one thread with
2650 * no interrrupts (hard or soft) and no timers
2651 */
2652
2653 /*
2654 * We must manually poll everything in this thread
2655 * to keep the driver going.
2656 */
2657
2658 /* Keep polling the chip until our IO is completed */
2659 /* Driver's timer will not function during panics. */
2660 /* Therefore, timer checks must be performed manually. */
2661 (void) drv_getparm(LBOLT, &time);
2662 timeout = time + drv_usectohz(1000000);
2663 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2664 EMLXS_SLI_POLL_INTR(hba);
2665 (void) drv_getparm(LBOLT, &time);
2666
2667 /* Trigger timer checks periodically */
2668 if (time >= timeout) {
2669 emlxs_timer_checks(hba);
2670 timeout = time + drv_usectohz(1000000);
2671 }
2672 }
2673 } else {
2674 /* Wait for IO completion */
2675 /* The driver's timer will detect */
2676 /* any timeout and abort the I/O. */
2677 mutex_enter(&EMLXS_PKT_LOCK);
2678 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2679 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2680 }
2681 mutex_exit(&EMLXS_PKT_LOCK);
2682 }
2683
2684 /* Check for fcp reset pkt */
2685 if (sbp->pkt_flags & PACKET_FCP_RESET) {
2686 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2687 /* Flush the IO's on the chipq */
2688 (void) emlxs_chipq_node_flush(port,
2689 &hba->chan[hba->channel_fcp],
2690 sbp->node, sbp);
2691 } else {
2692 /* Flush the IO's on the chipq for this lun */
2693 (void) emlxs_chipq_lun_flush(port,
2694 sbp->node, sbp->lun, sbp);
2695 }
2696
2697 if (sbp->flush_count == 0) {
2698 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2699 goto done;
2700 }
2701
2702 /* Set the timeout so the flush has time to complete */
2703 timeout = emlxs_timeout(hba, 60);
2704 (void) drv_getparm(LBOLT, &time);
2705 while ((time < timeout) && sbp->flush_count > 0) {
2706 delay(drv_usectohz(500000));
2707 (void) drv_getparm(LBOLT, &time);
2708 }
2709
2710 if (sbp->flush_count == 0) {
2711 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2712 goto done;
2713 }
2714
2715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2716 "sbp=%p flush_count=%d. Waiting...", sbp,
2717 sbp->flush_count);
2718
2719 /* Let's try this one more time */
2720
2721 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2722 /* Flush the IO's on the chipq */
2723 (void) emlxs_chipq_node_flush(port,
2724 &hba->chan[hba->channel_fcp],
2725 sbp->node, sbp);
2726 } else {
2727 /* Flush the IO's on the chipq for this lun */
2728 (void) emlxs_chipq_lun_flush(port,
2729 sbp->node, sbp->lun, sbp);
2730 }
2731
2732 /* Reset the timeout so the flush has time to complete */
2733 timeout = emlxs_timeout(hba, 60);
2734 (void) drv_getparm(LBOLT, &time);
2735 while ((time < timeout) && sbp->flush_count > 0) {
2736 delay(drv_usectohz(500000));
2737 (void) drv_getparm(LBOLT, &time);
2738 }
2739
2740 if (sbp->flush_count == 0) {
2741 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2742 goto done;
2743 }
2744
2745 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2746 "sbp=%p flush_count=%d. Resetting link.", sbp,
2747 sbp->flush_count);
2748
2749 /* Let's first try to reset the link */
2750 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
2751
2752 if (sbp->flush_count == 0) {
2753 goto done;
2754 }
2755
2756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2757 "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2758 sbp->flush_count);
2759
2760 /* If that doesn't work, reset the adapter */
2761 (void) emlxs_reset(port, FC_FCA_RESET);
2762
2763 if (sbp->flush_count != 0) {
2764 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2765 "sbp=%p flush_count=%d. Giving up.", sbp,
2766 sbp->flush_count);
2767 }
2768
2769 }
2770 /* PACKET_FCP_RESET */
2771 done:
2772
2773 /* Packet has been declared completed and is now ready to be returned */
2774
2775 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2776 emlxs_unswap_pkt(sbp);
2777 #endif /* EMLXS_MODREV2X */
2778
2779 mutex_enter(&sbp->mtx);
2780 sbp->pkt_flags |= PACKET_ULP_OWNED;
2781 mutex_exit(&sbp->mtx);
2782
2783 mutex_enter(&EMLXS_PORT_LOCK);
2784 hba->io_poll_count--;
2785 mutex_exit(&EMLXS_PORT_LOCK);
2786
2787 #ifdef FMA_SUPPORT
2788 if (!in_panic) {
2789 emlxs_check_dma(hba, sbp);
2790 }
2791 #endif
2792
2793 /* Make ULP completion callback if required */
2794 if (pkt->pkt_comp) {
2795 cp->ulpCmplCmd++;
2796 (*pkt->pkt_comp) (pkt);
2797 }
2798
2799 #ifdef FMA_SUPPORT
2800 if (hba->flag & FC_DMA_CHECK_ERROR) {
2801 emlxs_thread_spawn(hba, emlxs_restart_thread,
2802 NULL, NULL);
2803 }
2804 #endif
2805
2806 return;
2807
2808 } /* emlxs_poll() */
2809
2810
2811 static int
emlxs_fca_ub_alloc(opaque_t fca_port_handle,uint64_t tokens[],uint32_t size,uint32_t * count,uint32_t type)2812 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2813 uint32_t *count, uint32_t type)
2814 {
2815 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2816 emlxs_hba_t *hba = HBA;
2817 char *err = NULL;
2818 emlxs_unsol_buf_t *pool = NULL;
2819 emlxs_unsol_buf_t *new_pool = NULL;
2820 emlxs_config_t *cfg = &CFG;
2821 int32_t i;
2822 int result;
2823 uint32_t free_resv;
2824 uint32_t free;
2825 fc_unsol_buf_t *ubp;
2826 emlxs_ub_priv_t *ub_priv;
2827 int rc;
2828
2829 if (!(port->flag & EMLXS_INI_ENABLED)) {
2830 if (tokens && count) {
2831 bzero(tokens, (sizeof (uint64_t) * (*count)));
2832 }
2833 return (FC_SUCCESS);
2834 }
2835
2836 if (!(port->flag & EMLXS_INI_BOUND)) {
2837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2838 "fca_ub_alloc failed: Port not bound! size=%x count=%d "
2839 "type=%x", size, *count, type);
2840
2841 return (FC_FAILURE);
2842 }
2843
2844 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2845 "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type);
2846
2847 if (count && (*count > EMLXS_MAX_UBUFS)) {
2848 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2849 "fca_ub_alloc failed: Too many unsolicted buffers "
2850 "requested. count=%x", *count);
2851
2852 return (FC_FAILURE);
2853
2854 }
2855
2856 if (tokens == NULL) {
2857 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2858 "fca_ub_alloc failed: Token array is NULL.");
2859
2860 return (FC_FAILURE);
2861 }
2862
2863 /* Clear the token array */
2864 bzero(tokens, (sizeof (uint64_t) * (*count)));
2865
2866 free_resv = 0;
2867 free = *count;
2868 switch (type) {
2869 case FC_TYPE_BASIC_LS:
2870 err = "BASIC_LS";
2871 break;
2872 case FC_TYPE_EXTENDED_LS:
2873 err = "EXTENDED_LS";
2874 free = *count / 2; /* Hold 50% for normal use */
2875 free_resv = *count - free; /* Reserve 50% for RSCN use */
2876 break;
2877 case FC_TYPE_IS8802:
2878 err = "IS8802";
2879 break;
2880 case FC_TYPE_IS8802_SNAP:
2881 err = "IS8802_SNAP";
2882
2883 if (cfg[CFG_NETWORK_ON].current == 0) {
2884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2885 "fca_ub_alloc failed: IP support is disabled.");
2886
2887 return (FC_FAILURE);
2888 }
2889 break;
2890 case FC_TYPE_SCSI_FCP:
2891 err = "SCSI_FCP";
2892 break;
2893 case FC_TYPE_SCSI_GPP:
2894 err = "SCSI_GPP";
2895 break;
2896 case FC_TYPE_HIPP_FP:
2897 err = "HIPP_FP";
2898 break;
2899 case FC_TYPE_IPI3_MASTER:
2900 err = "IPI3_MASTER";
2901 break;
2902 case FC_TYPE_IPI3_SLAVE:
2903 err = "IPI3_SLAVE";
2904 break;
2905 case FC_TYPE_IPI3_PEER:
2906 err = "IPI3_PEER";
2907 break;
2908 case FC_TYPE_FC_SERVICES:
2909 err = "FC_SERVICES";
2910 break;
2911 }
2912
2913 mutex_enter(&EMLXS_UB_LOCK);
2914
2915 /*
2916 * Walk through the list of the unsolicited buffers
2917 * for this ddiinst of emlx.
2918 */
2919
2920 pool = port->ub_pool;
2921
2922 /*
2923 * The emlxs_fca_ub_alloc() can be called more than once with different
2924 * size. We will reject the call if there are
2925 * duplicate size with the same FC-4 type.
2926 */
2927 while (pool) {
2928 if ((pool->pool_type == type) &&
2929 (pool->pool_buf_size == size)) {
2930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2931 "fca_ub_alloc failed: Unsolicited buffer pool "
2932 "for %s of size 0x%x bytes already exists.",
2933 err, size);
2934
2935 result = FC_FAILURE;
2936 goto fail;
2937 }
2938
2939 pool = pool->pool_next;
2940 }
2941
2942 mutex_exit(&EMLXS_UB_LOCK);
2943
2944 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2945 KM_SLEEP);
2946
2947 new_pool->pool_next = NULL;
2948 new_pool->pool_type = type;
2949 new_pool->pool_buf_size = size;
2950 new_pool->pool_nentries = *count;
2951 new_pool->pool_available = new_pool->pool_nentries;
2952 new_pool->pool_free = free;
2953 new_pool->pool_free_resv = free_resv;
2954 new_pool->fc_ubufs =
2955 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2956
2957 new_pool->pool_first_token = port->ub_count;
2958 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2959
2960 for (i = 0; i < new_pool->pool_nentries; i++) {
2961 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2962 ubp->ub_port_handle = port->ulp_handle;
2963 ubp->ub_token = (uint64_t)((unsigned long)ubp);
2964 ubp->ub_bufsize = size;
2965 ubp->ub_class = FC_TRAN_CLASS3;
2966 ubp->ub_port_private = NULL;
2967 ubp->ub_fca_private =
2968 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2969 KM_SLEEP);
2970
2971 /*
2972 * Initialize emlxs_ub_priv_t
2973 */
2974 ub_priv = ubp->ub_fca_private;
2975 ub_priv->ubp = ubp;
2976 ub_priv->port = port;
2977 ub_priv->flags = EMLXS_UB_FREE;
2978 ub_priv->available = 1;
2979 ub_priv->pool = new_pool;
2980 ub_priv->time = 0;
2981 ub_priv->timeout = 0;
2982 ub_priv->token = port->ub_count;
2983 ub_priv->cmd = 0;
2984
2985 /* Allocate the actual buffer */
2986 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2987
2988
2989 tokens[i] = (uint64_t)((unsigned long)ubp);
2990 port->ub_count++;
2991 }
2992
2993 mutex_enter(&EMLXS_UB_LOCK);
2994
2995 /* Add the pool to the top of the pool list */
2996 new_pool->pool_prev = NULL;
2997 new_pool->pool_next = port->ub_pool;
2998
2999 if (port->ub_pool) {
3000 port->ub_pool->pool_prev = new_pool;
3001 }
3002 port->ub_pool = new_pool;
3003
3004 /* Set the post counts */
3005 if (type == FC_TYPE_IS8802_SNAP) {
3006 MAILBOXQ *mbox;
3007
3008 port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
3009
3010 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
3011 MEM_MBOX))) {
3012 emlxs_mb_config_farp(hba, mbox);
3013 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
3014 mbox, MBX_NOWAIT, 0);
3015 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3016 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
3017 }
3018 }
3019 port->flag |= EMLXS_PORT_IP_UP;
3020 } else if (type == FC_TYPE_EXTENDED_LS) {
3021 port->ub_post[hba->channel_els] += new_pool->pool_nentries;
3022 } else if (type == FC_TYPE_FC_SERVICES) {
3023 port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
3024 }
3025
3026 mutex_exit(&EMLXS_UB_LOCK);
3027
3028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3029 "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3030 *count, err, size);
3031
3032 return (FC_SUCCESS);
3033
3034 fail:
3035
3036 /* Clean the pool */
3037 for (i = 0; tokens[i] != 0; i++) {
3038 /* Get the buffer object */
3039 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3040 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3041
3042 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3043 "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x "
3044 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3045
3046 /* Free the actual buffer */
3047 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3048
3049 /* Free the private area of the buffer object */
3050 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3051
3052 tokens[i] = 0;
3053 port->ub_count--;
3054 }
3055
3056 if (new_pool) {
3057 /* Free the array of buffer objects in the pool */
3058 kmem_free((caddr_t)new_pool->fc_ubufs,
3059 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3060
3061 /* Free the pool object */
3062 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3063 }
3064
3065 mutex_exit(&EMLXS_UB_LOCK);
3066
3067 return (result);
3068
3069 } /* emlxs_fca_ub_alloc() */
3070
3071
3072 static void
emlxs_ub_els_reject(emlxs_port_t * port,fc_unsol_buf_t * ubp)3073 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3074 {
3075 emlxs_hba_t *hba = HBA;
3076 emlxs_ub_priv_t *ub_priv;
3077 fc_packet_t *pkt;
3078 ELS_PKT *els;
3079 uint32_t sid;
3080
3081 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3082
3083 if (hba->state <= FC_LINK_DOWN) {
3084 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3085 return;
3086 }
3087
3088 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3089 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3090 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3091 return;
3092 }
3093
3094 sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3095
3096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3097 "%s dropped: sid=%x. Rejecting.",
3098 emlxs_elscmd_xlate(ub_priv->cmd), sid);
3099
3100 pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3101 pkt->pkt_timeout = (2 * hba->fc_ratov);
3102
3103 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3104 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3105 pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3106 }
3107
3108 /* Build the fc header */
3109 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3110 pkt->pkt_cmd_fhdr.r_ctl =
3111 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3112 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3113 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3114 pkt->pkt_cmd_fhdr.f_ctl =
3115 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3116 pkt->pkt_cmd_fhdr.seq_id = 0;
3117 pkt->pkt_cmd_fhdr.df_ctl = 0;
3118 pkt->pkt_cmd_fhdr.seq_cnt = 0;
3119 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3120 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3121 pkt->pkt_cmd_fhdr.ro = 0;
3122
3123 /* Build the command */
3124 els = (ELS_PKT *) pkt->pkt_cmd;
3125 els->elsCode = 0x01;
3126 els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3127 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3128 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3129 els->un.lsRjt.un.b.vendorUnique = 0x02;
3130
3131 /* Send the pkt later in another thread */
3132 (void) emlxs_pkt_send(pkt, 0);
3133
3134 return;
3135
3136 } /* emlxs_ub_els_reject() */
3137
3138 extern int
emlxs_fca_ub_release(opaque_t fca_port_handle,uint32_t count,uint64_t tokens[])3139 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count,
3140 uint64_t tokens[])
3141 {
3142 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3143 emlxs_hba_t *hba = HBA;
3144 fc_unsol_buf_t *ubp;
3145 emlxs_ub_priv_t *ub_priv;
3146 uint32_t i;
3147 uint32_t time;
3148 emlxs_unsol_buf_t *pool;
3149
3150 if (count == 0) {
3151 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3152 "fca_ub_release: Nothing to do. count=%d", count);
3153
3154 return (FC_SUCCESS);
3155 }
3156
3157 if (!(port->flag & EMLXS_INI_BOUND)) {
3158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3159 "fca_ub_release failed: Port not bound. count=%d "
3160 "token[0]=%p",
3161 count, tokens[0]);
3162
3163 return (FC_UNBOUND);
3164 }
3165
3166 mutex_enter(&EMLXS_UB_LOCK);
3167
3168 if (!port->ub_pool) {
3169 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3170 "fca_ub_release failed: No pools! count=%d token[0]=%p",
3171 count, tokens[0]);
3172
3173 mutex_exit(&EMLXS_UB_LOCK);
3174 return (FC_UB_BADTOKEN);
3175 }
3176
3177 for (i = 0; i < count; i++) {
3178 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3179
3180 if (!ubp) {
3181 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3182 "fca_ub_release failed: count=%d tokens[%d]=0",
3183 count, i);
3184
3185 mutex_exit(&EMLXS_UB_LOCK);
3186 return (FC_UB_BADTOKEN);
3187 }
3188
3189 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3190
3191 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3192 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3193 "fca_ub_release failed: Dead buffer found. ubp=%p",
3194 ubp);
3195
3196 mutex_exit(&EMLXS_UB_LOCK);
3197 return (FC_UB_BADTOKEN);
3198 }
3199
3200 if (ub_priv->flags == EMLXS_UB_FREE) {
3201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3202 "fca_ub_release: Buffer already free! ubp=%p "
3203 "token=%x",
3204 ubp, ub_priv->token);
3205
3206 continue;
3207 }
3208
3209 /* Check for dropped els buffer */
3210 /* ULP will do this sometimes without sending a reply */
3211 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3212 !(ub_priv->flags & EMLXS_UB_REPLY)) {
3213 emlxs_ub_els_reject(port, ubp);
3214 }
3215
3216 /* Mark the buffer free */
3217 ub_priv->flags = EMLXS_UB_FREE;
3218 bzero(ubp->ub_buffer, ubp->ub_bufsize);
3219
3220 time = hba->timer_tics - ub_priv->time;
3221 ub_priv->time = 0;
3222 ub_priv->timeout = 0;
3223
3224 pool = ub_priv->pool;
3225
3226 if (ub_priv->flags & EMLXS_UB_RESV) {
3227 pool->pool_free_resv++;
3228 } else {
3229 pool->pool_free++;
3230 }
3231
3232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3233 "fca_ub_release: ubp=%p token=%x time=%d av=%d "
3234 "(%d,%d,%d,%d)",
3235 ubp, ub_priv->token, time, ub_priv->available,
3236 pool->pool_nentries, pool->pool_available,
3237 pool->pool_free, pool->pool_free_resv);
3238
3239 /* Check if pool can be destroyed now */
3240 if ((pool->pool_available == 0) &&
3241 (pool->pool_free + pool->pool_free_resv ==
3242 pool->pool_nentries)) {
3243 emlxs_ub_destroy(port, pool);
3244 }
3245 }
3246
3247 mutex_exit(&EMLXS_UB_LOCK);
3248
3249 return (FC_SUCCESS);
3250
3251 } /* emlxs_fca_ub_release() */
3252
3253
3254 static int
emlxs_fca_ub_free(opaque_t fca_port_handle,uint32_t count,uint64_t tokens[])3255 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3256 {
3257 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3258 emlxs_unsol_buf_t *pool;
3259 fc_unsol_buf_t *ubp;
3260 emlxs_ub_priv_t *ub_priv;
3261 uint32_t i;
3262
3263 if (!(port->flag & EMLXS_INI_ENABLED)) {
3264 return (FC_SUCCESS);
3265 }
3266
3267 if (count == 0) {
3268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3269 "fca_ub_free: Nothing to do. count=%d token[0]=%p", count,
3270 tokens[0]);
3271
3272 return (FC_SUCCESS);
3273 }
3274
3275 if (!(port->flag & EMLXS_INI_BOUND)) {
3276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3277 "fca_ub_free: Port not bound. count=%d token[0]=%p", count,
3278 tokens[0]);
3279
3280 return (FC_SUCCESS);
3281 }
3282
3283 mutex_enter(&EMLXS_UB_LOCK);
3284
3285 if (!port->ub_pool) {
3286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3287 "fca_ub_free failed: No pools! count=%d token[0]=%p", count,
3288 tokens[0]);
3289
3290 mutex_exit(&EMLXS_UB_LOCK);
3291 return (FC_UB_BADTOKEN);
3292 }
3293
3294 /* Process buffer list */
3295 for (i = 0; i < count; i++) {
3296 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3297
3298 if (!ubp) {
3299 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3300 "fca_ub_free failed: count=%d tokens[%d]=0", count,
3301 i);
3302
3303 mutex_exit(&EMLXS_UB_LOCK);
3304 return (FC_UB_BADTOKEN);
3305 }
3306
3307 /* Mark buffer unavailable */
3308 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3309
3310 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3311 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3312 "fca_ub_free failed: Dead buffer found. ubp=%p",
3313 ubp);
3314
3315 mutex_exit(&EMLXS_UB_LOCK);
3316 return (FC_UB_BADTOKEN);
3317 }
3318
3319 ub_priv->available = 0;
3320
3321 /* Mark one less buffer available in the parent pool */
3322 pool = ub_priv->pool;
3323
3324 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3325 "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3326 ub_priv->token, pool->pool_nentries,
3327 pool->pool_available - 1, pool->pool_free,
3328 pool->pool_free_resv);
3329
3330 if (pool->pool_available) {
3331 pool->pool_available--;
3332
3333 /* Check if pool can be destroyed */
3334 if ((pool->pool_available == 0) &&
3335 (pool->pool_free + pool->pool_free_resv ==
3336 pool->pool_nentries)) {
3337 emlxs_ub_destroy(port, pool);
3338 }
3339 }
3340 }
3341
3342 mutex_exit(&EMLXS_UB_LOCK);
3343
3344 return (FC_SUCCESS);
3345
3346 } /* emlxs_fca_ub_free() */
3347
3348
3349 /* EMLXS_UB_LOCK must be held when calling this routine */
3350 extern void
emlxs_ub_destroy(emlxs_port_t * port,emlxs_unsol_buf_t * pool)3351 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3352 {
3353 emlxs_hba_t *hba = HBA;
3354 emlxs_unsol_buf_t *next;
3355 emlxs_unsol_buf_t *prev;
3356 fc_unsol_buf_t *ubp;
3357 uint32_t i;
3358
3359 /* Remove the pool object from the pool list */
3360 next = pool->pool_next;
3361 prev = pool->pool_prev;
3362
3363 if (port->ub_pool == pool) {
3364 port->ub_pool = next;
3365 }
3366
3367 if (prev) {
3368 prev->pool_next = next;
3369 }
3370
3371 if (next) {
3372 next->pool_prev = prev;
3373 }
3374
3375 pool->pool_prev = NULL;
3376 pool->pool_next = NULL;
3377
3378 /* Clear the post counts */
3379 switch (pool->pool_type) {
3380 case FC_TYPE_IS8802_SNAP:
3381 port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3382 break;
3383
3384 case FC_TYPE_EXTENDED_LS:
3385 port->ub_post[hba->channel_els] -= pool->pool_nentries;
3386 break;
3387
3388 case FC_TYPE_FC_SERVICES:
3389 port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3390 break;
3391 }
3392
3393 /* Now free the pool memory */
3394 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3395 "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3396 pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3397
3398 /* Process the array of buffer objects in the pool */
3399 for (i = 0; i < pool->pool_nentries; i++) {
3400 /* Get the buffer object */
3401 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3402
3403 /* Free the memory the buffer object represents */
3404 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3405
3406 /* Free the private area of the buffer object */
3407 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3408 }
3409
3410 /* Free the array of buffer objects in the pool */
3411 kmem_free((caddr_t)pool->fc_ubufs,
3412 (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3413
3414 /* Free the pool object */
3415 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3416
3417 return;
3418
3419 } /* emlxs_ub_destroy() */
3420
3421
3422 /*ARGSUSED*/
3423 extern int
emlxs_fca_pkt_abort(opaque_t fca_port_handle,fc_packet_t * pkt,int32_t sleep)3424 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3425 {
3426 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3427 emlxs_hba_t *hba = HBA;
3428 emlxs_config_t *cfg = &CFG;
3429
3430 emlxs_buf_t *sbp;
3431 NODELIST *nlp;
3432 NODELIST *prev_nlp;
3433 uint8_t channelno;
3434 CHANNEL *cp;
3435 clock_t pkt_timeout;
3436 clock_t timer;
3437 clock_t time;
3438 int32_t pkt_ret;
3439 IOCBQ *iocbq;
3440 IOCBQ *next;
3441 IOCBQ *prev;
3442 uint32_t found;
3443 uint32_t pass = 0;
3444
3445 sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3446 iocbq = &sbp->iocbq;
3447 nlp = (NODELIST *)sbp->node;
3448 cp = (CHANNEL *)sbp->channel;
3449 channelno = (cp) ? cp->channelno : 0;
3450
3451 if (!(port->flag & EMLXS_INI_BOUND)) {
3452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3453 "Port not bound.");
3454 return (FC_UNBOUND);
3455 }
3456
3457 if (!(hba->flag & FC_ONLINE_MODE)) {
3458 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3459 "Adapter offline.");
3460 return (FC_OFFLINE);
3461 }
3462
3463 /* ULP requires the aborted pkt to be completed */
3464 /* back to ULP before returning from this call. */
3465 /* SUN knows of problems with this call so they suggested that we */
3466 /* always return a FC_FAILURE for this call, until it is worked out. */
3467
3468 /* Check if pkt is no good */
3469 if (!(sbp->pkt_flags & PACKET_VALID) ||
3470 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3471 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3472 "Bad sbp. flags=%x", sbp->pkt_flags);
3473 return (FC_FAILURE);
3474 }
3475
3476 /* Tag this now */
3477 /* This will prevent any thread except ours from completing it */
3478 mutex_enter(&sbp->mtx);
3479
3480 /* Check again if we still own this */
3481 if (!(sbp->pkt_flags & PACKET_VALID) ||
3482 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3483 mutex_exit(&sbp->mtx);
3484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3485 "Bad sbp. flags=%x", sbp->pkt_flags);
3486 return (FC_FAILURE);
3487 }
3488
3489 /* Check if pkt is a real polled command */
3490 if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3491 (sbp->pkt_flags & PACKET_POLLED)) {
3492 mutex_exit(&sbp->mtx);
3493
3494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3495 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3496 sbp->pkt_flags);
3497 return (FC_FAILURE);
3498 }
3499
3500 sbp->pkt_flags |= PACKET_POLLED;
3501 sbp->pkt_flags |= PACKET_IN_ABORT;
3502
3503 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3504 PACKET_IN_TIMEOUT)) {
3505 mutex_exit(&sbp->mtx);
3506
3507 /* Do nothing, pkt already on its way out */
3508 goto done;
3509 }
3510
3511 mutex_exit(&sbp->mtx);
3512
3513 begin:
3514 pass++;
3515
3516 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3517
3518 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3519 /* Find it on the queue */
3520 found = 0;
3521 if (iocbq->flag & IOCB_PRIORITY) {
3522 /* Search the priority queue */
3523 prev = NULL;
3524 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3525
3526 while (next) {
3527 if (next == iocbq) {
3528 /* Remove it */
3529 if (prev) {
3530 prev->next = iocbq->next;
3531 }
3532
3533 if (nlp->nlp_ptx[channelno].q_last ==
3534 (void *)iocbq) {
3535 nlp->nlp_ptx[channelno].q_last =
3536 (void *)prev;
3537 }
3538
3539 if (nlp->nlp_ptx[channelno].q_first ==
3540 (void *)iocbq) {
3541 nlp->nlp_ptx[channelno].
3542 q_first =
3543 (void *)iocbq->next;
3544 }
3545
3546 nlp->nlp_ptx[channelno].q_cnt--;
3547 iocbq->next = NULL;
3548 found = 1;
3549 break;
3550 }
3551
3552 prev = next;
3553 next = next->next;
3554 }
3555 } else {
3556 /* Search the normal queue */
3557 prev = NULL;
3558 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3559
3560 while (next) {
3561 if (next == iocbq) {
3562 /* Remove it */
3563 if (prev) {
3564 prev->next = iocbq->next;
3565 }
3566
3567 if (nlp->nlp_tx[channelno].q_last ==
3568 (void *)iocbq) {
3569 nlp->nlp_tx[channelno].q_last =
3570 (void *)prev;
3571 }
3572
3573 if (nlp->nlp_tx[channelno].q_first ==
3574 (void *)iocbq) {
3575 nlp->nlp_tx[channelno].q_first =
3576 (void *)iocbq->next;
3577 }
3578
3579 nlp->nlp_tx[channelno].q_cnt--;
3580 iocbq->next = NULL;
3581 found = 1;
3582 break;
3583 }
3584
3585 prev = next;
3586 next = (IOCBQ *) next->next;
3587 }
3588 }
3589
3590 if (!found) {
3591 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3592 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3593 "I/O not found in driver. sbp=%p flags=%x", sbp,
3594 sbp->pkt_flags);
3595 goto done;
3596 }
3597
3598 /* Check if node still needs servicing */
3599 if ((nlp->nlp_ptx[channelno].q_first) ||
3600 (nlp->nlp_tx[channelno].q_first &&
3601 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3602
3603 /*
3604 * If this is the base node,
3605 * then don't shift the pointers
3606 */
3607 /* We want to drain the base node before moving on */
3608 if (!nlp->nlp_base) {
3609 /* Just shift channel queue */
3610 /* pointers to next node */
3611 cp->nodeq.q_last = (void *) nlp;
3612 cp->nodeq.q_first = nlp->nlp_next[channelno];
3613 }
3614 } else {
3615 /* Remove node from channel queue */
3616
3617 /* If this is the only node on list */
3618 if (cp->nodeq.q_first == (void *)nlp &&
3619 cp->nodeq.q_last == (void *)nlp) {
3620 cp->nodeq.q_last = NULL;
3621 cp->nodeq.q_first = NULL;
3622 cp->nodeq.q_cnt = 0;
3623 } else if (cp->nodeq.q_first == (void *)nlp) {
3624 cp->nodeq.q_first = nlp->nlp_next[channelno];
3625 ((NODELIST *) cp->nodeq.q_last)->
3626 nlp_next[channelno] = cp->nodeq.q_first;
3627 cp->nodeq.q_cnt--;
3628 } else {
3629 /*
3630 * This is a little more difficult find the
3631 * previous node in the circular channel queue
3632 */
3633 prev_nlp = nlp;
3634 while (prev_nlp->nlp_next[channelno] != nlp) {
3635 prev_nlp = prev_nlp->
3636 nlp_next[channelno];
3637 }
3638
3639 prev_nlp->nlp_next[channelno] =
3640 nlp->nlp_next[channelno];
3641
3642 if (cp->nodeq.q_last == (void *)nlp) {
3643 cp->nodeq.q_last = (void *)prev_nlp;
3644 }
3645 cp->nodeq.q_cnt--;
3646
3647 }
3648
3649 /* Clear node */
3650 nlp->nlp_next[channelno] = NULL;
3651 }
3652
3653 /* Free the ULPIOTAG and the bmp */
3654 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3655 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3656 } else {
3657 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3658 }
3659
3660
3661 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3662
3663 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3664 IOERR_ABORT_REQUESTED, 1);
3665
3666 goto done;
3667 }
3668
3669 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3670
3671
3672 /* Check the chip queue */
3673 mutex_enter(&EMLXS_FCTAB_LOCK);
3674
3675 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3676 !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3677 (sbp == hba->fc_table[sbp->iotag])) {
3678
3679 /* Create the abort IOCB */
3680 if (hba->state >= FC_LINK_UP) {
3681 iocbq =
3682 emlxs_create_abort_xri_cn(port, sbp->node,
3683 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3684
3685 mutex_enter(&sbp->mtx);
3686 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3687 sbp->ticks =
3688 hba->timer_tics + (4 * hba->fc_ratov) + 10;
3689 sbp->abort_attempts++;
3690 mutex_exit(&sbp->mtx);
3691 } else {
3692 iocbq =
3693 emlxs_create_close_xri_cn(port, sbp->node,
3694 sbp->iotag, cp);
3695
3696 mutex_enter(&sbp->mtx);
3697 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3698 sbp->ticks = hba->timer_tics + 30;
3699 sbp->abort_attempts++;
3700 mutex_exit(&sbp->mtx);
3701 }
3702
3703 mutex_exit(&EMLXS_FCTAB_LOCK);
3704
3705 /* Send this iocbq */
3706 if (iocbq) {
3707 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3708 iocbq = NULL;
3709 }
3710
3711 goto done;
3712 }
3713
3714 mutex_exit(&EMLXS_FCTAB_LOCK);
3715
3716 /* Pkt was not on any queues */
3717
3718 /* Check again if we still own this */
3719 if (!(sbp->pkt_flags & PACKET_VALID) ||
3720 (sbp->pkt_flags &
3721 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3722 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3723 goto done;
3724 }
3725
3726 if (!sleep) {
3727 return (FC_FAILURE);
3728 }
3729
3730 /* Apparently the pkt was not found. Let's delay and try again */
3731 if (pass < 5) {
3732 delay(drv_usectohz(5000000)); /* 5 seconds */
3733
3734 /* Check again if we still own this */
3735 if (!(sbp->pkt_flags & PACKET_VALID) ||
3736 (sbp->pkt_flags &
3737 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3738 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3739 goto done;
3740 }
3741
3742 goto begin;
3743 }
3744
3745 force_it:
3746
3747 /* Force the completion now */
3748 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3749 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3750
3751 /* Now complete it */
3752 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3753 1);
3754
3755 done:
3756
3757 /* Now wait for the pkt to complete */
3758 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3759 /* Set thread timeout */
3760 pkt_timeout = emlxs_timeout(hba, 30);
3761
3762 /* Check for panic situation */
3763 if (ddi_in_panic()) {
3764
3765 /*
3766 * In panic situations there will be one thread with no
3767 * interrrupts (hard or soft) and no timers
3768 */
3769
3770 /*
3771 * We must manually poll everything in this thread
3772 * to keep the driver going.
3773 */
3774
3775 /* Keep polling the chip until our IO is completed */
3776 (void) drv_getparm(LBOLT, &time);
3777 timer = time + drv_usectohz(1000000);
3778 while ((time < pkt_timeout) &&
3779 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3780 EMLXS_SLI_POLL_INTR(hba);
3781 (void) drv_getparm(LBOLT, &time);
3782
3783 /* Trigger timer checks periodically */
3784 if (time >= timer) {
3785 emlxs_timer_checks(hba);
3786 timer = time + drv_usectohz(1000000);
3787 }
3788 }
3789 } else {
3790 /* Wait for IO completion or pkt_timeout */
3791 mutex_enter(&EMLXS_PKT_LOCK);
3792 pkt_ret = 0;
3793 while ((pkt_ret != -1) &&
3794 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3795 pkt_ret =
3796 cv_timedwait(&EMLXS_PKT_CV,
3797 &EMLXS_PKT_LOCK, pkt_timeout);
3798 }
3799 mutex_exit(&EMLXS_PKT_LOCK);
3800 }
3801
3802 /* Check if pkt_timeout occured. This is not good. */
3803 /* Something happened to our IO. */
3804 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3805 /* Force the completion now */
3806 goto force_it;
3807 }
3808 }
3809 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3810 emlxs_unswap_pkt(sbp);
3811 #endif /* EMLXS_MODREV2X */
3812
3813 /* Check again if we still own this */
3814 if ((sbp->pkt_flags & PACKET_VALID) &&
3815 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3816 mutex_enter(&sbp->mtx);
3817 if ((sbp->pkt_flags & PACKET_VALID) &&
3818 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3819 sbp->pkt_flags |= PACKET_ULP_OWNED;
3820 }
3821 mutex_exit(&sbp->mtx);
3822 }
3823
3824 #ifdef ULP_PATCH5
3825 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3826 return (FC_FAILURE);
3827 }
3828 #endif /* ULP_PATCH5 */
3829
3830 return (FC_SUCCESS);
3831
3832 } /* emlxs_fca_pkt_abort() */
3833
3834
3835 static void
emlxs_abort_all(emlxs_hba_t * hba,uint32_t * tx,uint32_t * chip)3836 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3837 {
3838 emlxs_port_t *port = &PPORT;
3839 fc_packet_t *pkt;
3840 emlxs_buf_t *sbp;
3841 uint32_t i;
3842 uint32_t flg;
3843 uint32_t rc;
3844 uint32_t txcnt;
3845 uint32_t chipcnt;
3846
3847 txcnt = 0;
3848 chipcnt = 0;
3849
3850 mutex_enter(&EMLXS_FCTAB_LOCK);
3851 for (i = 0; i < hba->max_iotag; i++) {
3852 sbp = hba->fc_table[i];
3853 if (sbp == NULL || sbp == STALE_PACKET) {
3854 continue;
3855 }
3856 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ);
3857 pkt = PRIV2PKT(sbp);
3858 mutex_exit(&EMLXS_FCTAB_LOCK);
3859 rc = emlxs_fca_pkt_abort(port, pkt, 0);
3860 if (rc == FC_SUCCESS) {
3861 if (flg) {
3862 chipcnt++;
3863 } else {
3864 txcnt++;
3865 }
3866 }
3867 mutex_enter(&EMLXS_FCTAB_LOCK);
3868 }
3869 mutex_exit(&EMLXS_FCTAB_LOCK);
3870 *tx = txcnt;
3871 *chip = chipcnt;
3872 } /* emlxs_abort_all() */
3873
3874
3875 extern int32_t
emlxs_reset(emlxs_port_t * port,uint32_t cmd)3876 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3877 {
3878 emlxs_hba_t *hba = HBA;
3879 int rval;
3880 int i = 0;
3881 int ret;
3882 clock_t timeout;
3883
3884 switch (cmd) {
3885 case FC_FCA_LINK_RESET:
3886
3887 mutex_enter(&EMLXS_PORT_LOCK);
3888 if (!(hba->flag & FC_ONLINE_MODE) ||
3889 (hba->state <= FC_LINK_DOWN)) {
3890 mutex_exit(&EMLXS_PORT_LOCK);
3891 return (FC_SUCCESS);
3892 }
3893
3894 if (hba->reset_state &
3895 (FC_LINK_RESET_INP | FC_PORT_RESET_INP)) {
3896 mutex_exit(&EMLXS_PORT_LOCK);
3897 return (FC_FAILURE);
3898 }
3899
3900 hba->reset_state |= FC_LINK_RESET_INP;
3901 hba->reset_request |= FC_LINK_RESET;
3902 mutex_exit(&EMLXS_PORT_LOCK);
3903
3904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3905 "Resetting Link.");
3906
3907 mutex_enter(&EMLXS_LINKUP_LOCK);
3908 hba->linkup_wait_flag = TRUE;
3909 mutex_exit(&EMLXS_LINKUP_LOCK);
3910
3911 if (emlxs_reset_link(hba, 1, 1)) {
3912 mutex_enter(&EMLXS_LINKUP_LOCK);
3913 hba->linkup_wait_flag = FALSE;
3914 mutex_exit(&EMLXS_LINKUP_LOCK);
3915
3916 mutex_enter(&EMLXS_PORT_LOCK);
3917 hba->reset_state &= ~FC_LINK_RESET_INP;
3918 hba->reset_request &= ~FC_LINK_RESET;
3919 mutex_exit(&EMLXS_PORT_LOCK);
3920
3921 return (FC_FAILURE);
3922 }
3923
3924 mutex_enter(&EMLXS_LINKUP_LOCK);
3925 timeout = emlxs_timeout(hba, 60);
3926 ret = 0;
3927 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3928 ret =
3929 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3930 timeout);
3931 }
3932
3933 hba->linkup_wait_flag = FALSE;
3934 mutex_exit(&EMLXS_LINKUP_LOCK);
3935
3936 mutex_enter(&EMLXS_PORT_LOCK);
3937 hba->reset_state &= ~FC_LINK_RESET_INP;
3938 hba->reset_request &= ~FC_LINK_RESET;
3939 mutex_exit(&EMLXS_PORT_LOCK);
3940
3941 if (ret == -1) {
3942 return (FC_FAILURE);
3943 }
3944
3945 return (FC_SUCCESS);
3946
3947 case FC_FCA_CORE:
3948 #ifdef DUMP_SUPPORT
3949 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3950 "Dumping Core.");
3951
3952 /* Schedule a USER dump */
3953 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3954
3955 /* Wait for dump to complete */
3956 emlxs_dump_wait(hba);
3957
3958 return (FC_SUCCESS);
3959 #endif /* DUMP_SUPPORT */
3960
3961 case FC_FCA_RESET:
3962 case FC_FCA_RESET_CORE:
3963
3964 mutex_enter(&EMLXS_PORT_LOCK);
3965 if (hba->reset_state & FC_PORT_RESET_INP) {
3966 mutex_exit(&EMLXS_PORT_LOCK);
3967 return (FC_FAILURE);
3968 }
3969
3970 hba->reset_state |= FC_PORT_RESET_INP;
3971 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
3972
3973 /* wait for any pending link resets to complete */
3974 while ((hba->reset_state & FC_LINK_RESET_INP) &&
3975 (i++ < 1000)) {
3976 mutex_exit(&EMLXS_PORT_LOCK);
3977 delay(drv_usectohz(1000));
3978 mutex_enter(&EMLXS_PORT_LOCK);
3979 }
3980
3981 if (hba->reset_state & FC_LINK_RESET_INP) {
3982 hba->reset_state &= ~FC_PORT_RESET_INP;
3983 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
3984 mutex_exit(&EMLXS_PORT_LOCK);
3985 return (FC_FAILURE);
3986 }
3987 mutex_exit(&EMLXS_PORT_LOCK);
3988
3989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3990 "Resetting Adapter.");
3991
3992 rval = FC_SUCCESS;
3993
3994 if (emlxs_offline(hba, 0) == 0) {
3995 (void) emlxs_online(hba);
3996 } else {
3997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3998 "Adapter reset failed. Device busy.");
3999
4000 rval = FC_DEVICE_BUSY;
4001 }
4002
4003 mutex_enter(&EMLXS_PORT_LOCK);
4004 hba->reset_state &= ~FC_PORT_RESET_INP;
4005 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4006 mutex_exit(&EMLXS_PORT_LOCK);
4007
4008 return (rval);
4009
4010 case EMLXS_DFC_RESET_ALL:
4011 case EMLXS_DFC_RESET_ALL_FORCE_DUMP:
4012
4013 mutex_enter(&EMLXS_PORT_LOCK);
4014 if (hba->reset_state & FC_PORT_RESET_INP) {
4015 mutex_exit(&EMLXS_PORT_LOCK);
4016 return (FC_FAILURE);
4017 }
4018
4019 hba->reset_state |= FC_PORT_RESET_INP;
4020 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
4021
4022 /* wait for any pending link resets to complete */
4023 while ((hba->reset_state & FC_LINK_RESET_INP) &&
4024 (i++ < 1000)) {
4025 mutex_exit(&EMLXS_PORT_LOCK);
4026 delay(drv_usectohz(1000));
4027 mutex_enter(&EMLXS_PORT_LOCK);
4028 }
4029
4030 if (hba->reset_state & FC_LINK_RESET_INP) {
4031 hba->reset_state &= ~FC_PORT_RESET_INP;
4032 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4033 mutex_exit(&EMLXS_PORT_LOCK);
4034 return (FC_FAILURE);
4035 }
4036 mutex_exit(&EMLXS_PORT_LOCK);
4037
4038 rval = FC_SUCCESS;
4039
4040 if (cmd == EMLXS_DFC_RESET_ALL) {
4041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4042 "Resetting Adapter (All Firmware Reset).");
4043
4044 emlxs_sli4_hba_reset_all(hba, 0);
4045 } else {
4046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4047 "Resetting Adapter "
4048 "(All Firmware Reset, Force Dump).");
4049
4050 emlxs_sli4_hba_reset_all(hba, 1);
4051 }
4052
4053 mutex_enter(&EMLXS_PORT_LOCK);
4054 hba->reset_state &= ~FC_PORT_RESET_INP;
4055 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4056 mutex_exit(&EMLXS_PORT_LOCK);
4057
4058 /* Wait for the timer thread to detect the error condition */
4059 delay(drv_usectohz(1000000));
4060
4061 /* Wait for the HBA to re-initialize */
4062 i = 0;
4063 mutex_enter(&EMLXS_PORT_LOCK);
4064 while (!(hba->flag & FC_ONLINE_MODE) && (i++ < 30)) {
4065 mutex_exit(&EMLXS_PORT_LOCK);
4066 delay(drv_usectohz(1000000));
4067 mutex_enter(&EMLXS_PORT_LOCK);
4068 }
4069
4070 if (!(hba->flag & FC_ONLINE_MODE)) {
4071 rval = FC_FAILURE;
4072 }
4073
4074 mutex_exit(&EMLXS_PORT_LOCK);
4075
4076 return (rval);
4077
4078 default:
4079 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4080 "reset: Unknown command. cmd=%x", cmd);
4081
4082 break;
4083 }
4084
4085 return (FC_FAILURE);
4086
4087 } /* emlxs_reset() */
4088
4089
4090 extern int32_t
emlxs_fca_reset(opaque_t fca_port_handle,uint32_t cmd)4091 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
4092 {
4093 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4094 emlxs_hba_t *hba = HBA;
4095 int32_t rval;
4096
4097 if (port->mode != MODE_INITIATOR) {
4098 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4099 "fca_reset failed. Port is not in initiator mode.");
4100
4101 return (FC_FAILURE);
4102 }
4103
4104 if (!(port->flag & EMLXS_INI_BOUND)) {
4105 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4106 "fca_reset: Port not bound.");
4107
4108 return (FC_UNBOUND);
4109 }
4110
4111 switch (cmd) {
4112 case FC_FCA_LINK_RESET:
4113 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4114 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4115 "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
4116 cmd = FC_FCA_RESET;
4117 } else {
4118 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4119 "fca_reset: FC_FCA_LINK_RESET");
4120 }
4121 break;
4122
4123 case FC_FCA_CORE:
4124 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4125 "fca_reset: FC_FCA_CORE");
4126 break;
4127
4128 case FC_FCA_RESET:
4129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4130 "fca_reset: FC_FCA_RESET");
4131 break;
4132
4133 case FC_FCA_RESET_CORE:
4134 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4135 "fca_reset: FC_FCA_RESET_CORE");
4136 break;
4137
4138 default:
4139 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4140 "fca_reset: Unknown command. cmd=%x", cmd);
4141 return (FC_FAILURE);
4142 }
4143
4144 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4145 hba->fw_flag |= FW_UPDATE_KERNEL;
4146 }
4147
4148 rval = emlxs_reset(port, cmd);
4149
4150 return (rval);
4151
4152 } /* emlxs_fca_reset() */
4153
4154
4155 extern int
emlxs_fca_port_manage(opaque_t fca_port_handle,fc_fca_pm_t * pm)4156 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4157 {
4158 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4159 emlxs_hba_t *hba = HBA;
4160 int32_t ret;
4161 emlxs_vpd_t *vpd = &VPD;
4162
4163 ret = FC_SUCCESS;
4164
4165 #ifdef IDLE_TIMER
4166 emlxs_pm_busy_component(hba);
4167 #endif /* IDLE_TIMER */
4168
4169 switch (pm->pm_cmd_code) {
4170
4171 case FC_PORT_GET_FW_REV:
4172 {
4173 char buffer[128];
4174
4175 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4176 "fca_port_manage: FC_PORT_GET_FW_REV");
4177
4178 (void) snprintf(buffer, (sizeof (buffer)-1),
4179 "%s %s", hba->model_info.model,
4180 vpd->fw_version);
4181 bzero(pm->pm_data_buf, pm->pm_data_len);
4182
4183 if (pm->pm_data_len < strlen(buffer) + 1) {
4184 ret = FC_NOMEM;
4185
4186 break;
4187 }
4188
4189 (void) strncpy(pm->pm_data_buf, buffer,
4190 (pm->pm_data_len-1));
4191 break;
4192 }
4193
4194 case FC_PORT_GET_FCODE_REV:
4195 {
4196 char buffer[128];
4197
4198 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4199 "fca_port_manage: FC_PORT_GET_FCODE_REV");
4200
4201 /* Force update here just to be sure */
4202 emlxs_get_fcode_version(hba);
4203
4204 (void) snprintf(buffer, (sizeof (buffer)-1),
4205 "%s %s", hba->model_info.model,
4206 vpd->fcode_version);
4207 bzero(pm->pm_data_buf, pm->pm_data_len);
4208
4209 if (pm->pm_data_len < strlen(buffer) + 1) {
4210 ret = FC_NOMEM;
4211 break;
4212 }
4213
4214 (void) strncpy(pm->pm_data_buf, buffer,
4215 (pm->pm_data_len-1));
4216 break;
4217 }
4218
4219 case FC_PORT_GET_DUMP_SIZE:
4220 {
4221 #ifdef DUMP_SUPPORT
4222 uint32_t dump_size = 0;
4223
4224 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4225 "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4226
4227 if (pm->pm_data_len < sizeof (uint32_t)) {
4228 ret = FC_NOMEM;
4229 break;
4230 }
4231
4232 (void) emlxs_get_dump(hba, NULL, &dump_size);
4233
4234 *((uint32_t *)pm->pm_data_buf) = dump_size;
4235
4236 #else
4237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4238 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4239
4240 #endif /* DUMP_SUPPORT */
4241
4242 break;
4243 }
4244
4245 case FC_PORT_GET_DUMP:
4246 {
4247 #ifdef DUMP_SUPPORT
4248 uint32_t dump_size = 0;
4249
4250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4251 "fca_port_manage: FC_PORT_GET_DUMP");
4252
4253 (void) emlxs_get_dump(hba, NULL, &dump_size);
4254
4255 if (pm->pm_data_len < dump_size) {
4256 ret = FC_NOMEM;
4257 break;
4258 }
4259
4260 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4261 (uint32_t *)&dump_size);
4262 #else
4263 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4264 "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4265
4266 #endif /* DUMP_SUPPORT */
4267
4268 break;
4269 }
4270
4271 case FC_PORT_FORCE_DUMP:
4272 {
4273 #ifdef DUMP_SUPPORT
4274 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4275 "fca_port_manage: FC_PORT_FORCE_DUMP");
4276
4277 /* Schedule a USER dump */
4278 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4279
4280 /* Wait for dump to complete */
4281 emlxs_dump_wait(hba);
4282 #else
4283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4284 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4285
4286 #endif /* DUMP_SUPPORT */
4287 break;
4288 }
4289
4290 case FC_PORT_LINK_STATE:
4291 {
4292 uint32_t *link_state;
4293
4294 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4295 "fca_port_manage: FC_PORT_LINK_STATE");
4296
4297 if (pm->pm_stat_len != sizeof (*link_state)) {
4298 ret = FC_NOMEM;
4299 break;
4300 }
4301
4302 if (pm->pm_cmd_buf != NULL) {
4303 /*
4304 * Can't look beyond the FCA port.
4305 */
4306 ret = FC_INVALID_REQUEST;
4307 break;
4308 }
4309
4310 link_state = (uint32_t *)pm->pm_stat_buf;
4311
4312 /* Set the state */
4313 if (hba->state >= FC_LINK_UP) {
4314 /* Check for loop topology */
4315 if (hba->topology == TOPOLOGY_LOOP) {
4316 *link_state = FC_STATE_LOOP;
4317 } else {
4318 *link_state = FC_STATE_ONLINE;
4319 }
4320
4321 /* Set the link speed */
4322 switch (hba->linkspeed) {
4323 case LA_2GHZ_LINK:
4324 *link_state |= FC_STATE_2GBIT_SPEED;
4325 break;
4326 case LA_4GHZ_LINK:
4327 *link_state |= FC_STATE_4GBIT_SPEED;
4328 break;
4329 case LA_8GHZ_LINK:
4330 *link_state |= FC_STATE_8GBIT_SPEED;
4331 break;
4332 case LA_10GHZ_LINK:
4333 *link_state |= FC_STATE_10GBIT_SPEED;
4334 break;
4335 case LA_16GHZ_LINK:
4336 *link_state |= FC_STATE_16GBIT_SPEED;
4337 break;
4338 case LA_32GHZ_LINK:
4339 *link_state |= FC_STATE_32GBIT_SPEED;
4340 break;
4341 case LA_1GHZ_LINK:
4342 default:
4343 *link_state |= FC_STATE_1GBIT_SPEED;
4344 break;
4345 }
4346 } else {
4347 *link_state = FC_STATE_OFFLINE;
4348 }
4349
4350 break;
4351 }
4352
4353
4354 case FC_PORT_ERR_STATS:
4355 case FC_PORT_RLS:
4356 {
4357 MAILBOXQ *mbq;
4358 MAILBOX *mb;
4359 fc_rls_acc_t *bp;
4360
4361 if (!(hba->flag & FC_ONLINE_MODE)) {
4362 return (FC_OFFLINE);
4363 }
4364 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4365 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4366
4367 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4368 ret = FC_NOMEM;
4369 break;
4370 }
4371
4372 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4373 MEM_MBOX)) == 0) {
4374 ret = FC_NOMEM;
4375 break;
4376 }
4377 mb = (MAILBOX *)mbq;
4378
4379 emlxs_mb_read_lnk_stat(hba, mbq);
4380 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4381 != MBX_SUCCESS) {
4382 ret = FC_PBUSY;
4383 } else {
4384 bp = (fc_rls_acc_t *)pm->pm_data_buf;
4385
4386 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4387 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4388 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4389 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4390 bp->rls_invalid_word =
4391 mb->un.varRdLnk.invalidXmitWord;
4392 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4393 }
4394
4395 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4396 break;
4397 }
4398
4399 case FC_PORT_DOWNLOAD_FW:
4400 if (!(hba->flag & FC_ONLINE_MODE)) {
4401 return (FC_OFFLINE);
4402 }
4403 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4404 "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4405 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4406 pm->pm_data_len, 1);
4407 break;
4408
4409 case FC_PORT_DOWNLOAD_FCODE:
4410 if (!(hba->flag & FC_ONLINE_MODE)) {
4411 return (FC_OFFLINE);
4412 }
4413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4414 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4415 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4416 pm->pm_data_len, 1);
4417 break;
4418
4419 case FC_PORT_DIAG:
4420 {
4421 uint32_t errno = 0;
4422 uint32_t did = 0;
4423 uint32_t pattern = 0;
4424
4425 switch (pm->pm_cmd_flags) {
4426 case EMLXS_DIAG_BIU:
4427
4428 if (!(hba->flag & FC_ONLINE_MODE)) {
4429 return (FC_OFFLINE);
4430 }
4431 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4432 "fca_port_manage: DIAG_BIU");
4433
4434 if (pm->pm_data_len) {
4435 pattern = *((uint32_t *)pm->pm_data_buf);
4436 }
4437
4438 errno = emlxs_diag_biu_run(hba, pattern);
4439
4440 if (pm->pm_stat_len == sizeof (errno)) {
4441 *(int *)pm->pm_stat_buf = errno;
4442 }
4443
4444 break;
4445
4446
4447 case EMLXS_DIAG_POST:
4448
4449 if (!(hba->flag & FC_ONLINE_MODE)) {
4450 return (FC_OFFLINE);
4451 }
4452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4453 "fca_port_manage: DIAG_POST");
4454
4455 errno = emlxs_diag_post_run(hba);
4456
4457 if (pm->pm_stat_len == sizeof (errno)) {
4458 *(int *)pm->pm_stat_buf = errno;
4459 }
4460
4461 break;
4462
4463
4464 case EMLXS_DIAG_ECHO:
4465
4466 if (!(hba->flag & FC_ONLINE_MODE)) {
4467 return (FC_OFFLINE);
4468 }
4469 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4470 "fca_port_manage: DIAG_ECHO");
4471
4472 if (pm->pm_cmd_len != sizeof (uint32_t)) {
4473 ret = FC_INVALID_REQUEST;
4474 break;
4475 }
4476
4477 did = *((uint32_t *)pm->pm_cmd_buf);
4478
4479 if (pm->pm_data_len) {
4480 pattern = *((uint32_t *)pm->pm_data_buf);
4481 }
4482
4483 errno = emlxs_diag_echo_run(port, did, pattern);
4484
4485 if (pm->pm_stat_len == sizeof (errno)) {
4486 *(int *)pm->pm_stat_buf = errno;
4487 }
4488
4489 break;
4490
4491
4492 case EMLXS_PARM_GET_NUM:
4493 {
4494 uint32_t *num;
4495 emlxs_config_t *cfg;
4496 uint32_t i;
4497 uint32_t count;
4498 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4499 "fca_port_manage: PARM_GET_NUM");
4500
4501 if (pm->pm_stat_len < sizeof (uint32_t)) {
4502 ret = FC_NOMEM;
4503 break;
4504 }
4505
4506 num = (uint32_t *)pm->pm_stat_buf;
4507 count = 0;
4508 cfg = &CFG;
4509 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4510 if (!(cfg->flags & PARM_HIDDEN)) {
4511 count++;
4512 }
4513
4514 }
4515
4516 *num = count;
4517
4518 break;
4519 }
4520
4521 case EMLXS_PARM_GET_LIST:
4522 {
4523 emlxs_parm_t *parm;
4524 emlxs_config_t *cfg;
4525 uint32_t i;
4526 uint32_t max_count;
4527
4528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4529 "fca_port_manage: PARM_GET_LIST");
4530
4531 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4532 ret = FC_NOMEM;
4533 break;
4534 }
4535
4536 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4537
4538 parm = (emlxs_parm_t *)pm->pm_stat_buf;
4539 cfg = &CFG;
4540 for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4541 cfg++) {
4542 if (!(cfg->flags & PARM_HIDDEN)) {
4543 (void) strncpy(parm->label, cfg->string,
4544 (sizeof (parm->label)-1));
4545 parm->min = cfg->low;
4546 parm->max = cfg->hi;
4547 parm->def = cfg->def;
4548 parm->current = cfg->current;
4549 parm->flags = cfg->flags;
4550 (void) strncpy(parm->help, cfg->help,
4551 (sizeof (parm->help)-1));
4552 parm++;
4553 max_count--;
4554 }
4555 }
4556
4557 break;
4558 }
4559
4560 case EMLXS_PARM_GET:
4561 {
4562 emlxs_parm_t *parm_in;
4563 emlxs_parm_t *parm_out;
4564 emlxs_config_t *cfg;
4565 uint32_t i;
4566 uint32_t len;
4567
4568 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4569 EMLXS_MSGF(EMLXS_CONTEXT,
4570 &emlxs_sfs_debug_msg,
4571 "fca_port_manage: PARM_GET. "
4572 "inbuf too small.");
4573
4574 ret = FC_BADCMD;
4575 break;
4576 }
4577
4578 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4579 EMLXS_MSGF(EMLXS_CONTEXT,
4580 &emlxs_sfs_debug_msg,
4581 "fca_port_manage: PARM_GET. "
4582 "outbuf too small");
4583
4584 ret = FC_BADCMD;
4585 break;
4586 }
4587
4588 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4589 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4590 len = strlen(parm_in->label);
4591 cfg = &CFG;
4592 ret = FC_BADOBJECT;
4593
4594 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4595 "fca_port_manage: PARM_GET: %s=0x%x,%d",
4596 parm_in->label, parm_in->current,
4597 parm_in->current);
4598
4599 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4600 if (len == strlen(cfg->string) &&
4601 (strcmp(parm_in->label,
4602 cfg->string) == 0)) {
4603 (void) strncpy(parm_out->label,
4604 cfg->string,
4605 (sizeof (parm_out->label)-1));
4606 parm_out->min = cfg->low;
4607 parm_out->max = cfg->hi;
4608 parm_out->def = cfg->def;
4609 parm_out->current = cfg->current;
4610 parm_out->flags = cfg->flags;
4611 (void) strncpy(parm_out->help,
4612 cfg->help,
4613 (sizeof (parm_out->help)-1));
4614
4615 ret = FC_SUCCESS;
4616 break;
4617 }
4618 }
4619
4620 break;
4621 }
4622
4623 case EMLXS_PARM_SET:
4624 {
4625 emlxs_parm_t *parm_in;
4626 emlxs_parm_t *parm_out;
4627 emlxs_config_t *cfg;
4628 uint32_t i;
4629 uint32_t len;
4630
4631 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4632 EMLXS_MSGF(EMLXS_CONTEXT,
4633 &emlxs_sfs_debug_msg,
4634 "fca_port_manage: PARM_GET. "
4635 "inbuf too small.");
4636
4637 ret = FC_BADCMD;
4638 break;
4639 }
4640
4641 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4642 EMLXS_MSGF(EMLXS_CONTEXT,
4643 &emlxs_sfs_debug_msg,
4644 "fca_port_manage: PARM_GET. "
4645 "outbuf too small");
4646 ret = FC_BADCMD;
4647 break;
4648 }
4649
4650 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4651 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4652 len = strlen(parm_in->label);
4653 cfg = &CFG;
4654 ret = FC_BADOBJECT;
4655
4656 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4657 "fca_port_manage: PARM_SET: %s=0x%x,%d",
4658 parm_in->label, parm_in->current,
4659 parm_in->current);
4660
4661 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4662 /* Find matching parameter string */
4663 if (len == strlen(cfg->string) &&
4664 (strcmp(parm_in->label,
4665 cfg->string) == 0)) {
4666 /* Attempt to update parameter */
4667 if (emlxs_set_parm(hba, i,
4668 parm_in->current) == FC_SUCCESS) {
4669 (void) strncpy(parm_out->label,
4670 cfg->string,
4671 (sizeof (parm_out->label)-
4672 1));
4673 parm_out->min = cfg->low;
4674 parm_out->max = cfg->hi;
4675 parm_out->def = cfg->def;
4676 parm_out->current =
4677 cfg->current;
4678 parm_out->flags = cfg->flags;
4679 (void) strncpy(parm_out->help,
4680 cfg->help,
4681 (sizeof (parm_out->help)-
4682 1));
4683
4684 ret = FC_SUCCESS;
4685 }
4686
4687 break;
4688 }
4689 }
4690
4691 break;
4692 }
4693
4694 case EMLXS_LOG_GET:
4695 {
4696 emlxs_log_req_t *req;
4697 emlxs_log_resp_t *resp;
4698 uint32_t len;
4699
4700 /* Check command size */
4701 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4702 ret = FC_BADCMD;
4703 break;
4704 }
4705
4706 /* Get the request */
4707 req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4708
4709 /* Calculate the response length from the request */
4710 len = sizeof (emlxs_log_resp_t) +
4711 (req->count * MAX_LOG_MSG_LENGTH);
4712
4713 /* Check the response buffer length */
4714 if (pm->pm_stat_len < len) {
4715 ret = FC_BADCMD;
4716 break;
4717 }
4718
4719 /* Get the response pointer */
4720 resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4721
4722 /* Get the request log enties */
4723 (void) emlxs_msg_log_get(hba, req, resp);
4724
4725 ret = FC_SUCCESS;
4726 break;
4727 }
4728
4729 case EMLXS_GET_BOOT_REV:
4730 {
4731 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4732 "fca_port_manage: GET_BOOT_REV");
4733
4734 if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4735 ret = FC_NOMEM;
4736 break;
4737 }
4738
4739 bzero(pm->pm_stat_buf, pm->pm_stat_len);
4740 (void) snprintf(pm->pm_stat_buf, pm->pm_stat_len,
4741 "%s %s", hba->model_info.model, vpd->boot_version);
4742
4743 break;
4744 }
4745
4746 case EMLXS_DOWNLOAD_BOOT:
4747 if (!(hba->flag & FC_ONLINE_MODE)) {
4748 return (FC_OFFLINE);
4749 }
4750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4751 "fca_port_manage: DOWNLOAD_BOOT");
4752
4753 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4754 pm->pm_data_len, 1);
4755 break;
4756
4757 case EMLXS_DOWNLOAD_CFL:
4758 {
4759 uint32_t *buffer;
4760 uint32_t region;
4761 uint32_t length;
4762
4763 if (!(hba->flag & FC_ONLINE_MODE)) {
4764 return (FC_OFFLINE);
4765 }
4766
4767 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4768 "fca_port_manage: DOWNLOAD_CFL");
4769
4770 /* Extract the region number from the first word. */
4771 buffer = (uint32_t *)pm->pm_data_buf;
4772 region = *buffer++;
4773
4774 /* Adjust the image length for the header word */
4775 length = pm->pm_data_len - 4;
4776
4777 ret =
4778 emlxs_cfl_download(hba, region, (caddr_t)buffer,
4779 length);
4780 break;
4781 }
4782
4783 case EMLXS_VPD_GET:
4784 {
4785 emlxs_vpd_desc_t *vpd_out;
4786
4787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4788 "fca_port_manage: VPD_GET");
4789
4790 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4791 ret = FC_BADCMD;
4792 break;
4793 }
4794
4795 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4796 bzero(vpd_out, pm->pm_stat_len);
4797
4798 (void) strncpy(vpd_out->id, vpd->id,
4799 (sizeof (vpd_out->id)-1));
4800 (void) strncpy(vpd_out->part_num, vpd->part_num,
4801 (sizeof (vpd_out->part_num)-1));
4802 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4803 (sizeof (vpd_out->eng_change)-1));
4804 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4805 (sizeof (vpd_out->manufacturer)-1));
4806 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4807 (sizeof (vpd_out->serial_num)-1));
4808 (void) strncpy(vpd_out->model, vpd->model,
4809 (sizeof (vpd_out->model)-1));
4810 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4811 (sizeof (vpd_out->model_desc)-1));
4812 (void) strncpy(vpd_out->port_num, vpd->port_num,
4813 (sizeof (vpd_out->port_num)-1));
4814 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4815 (sizeof (vpd_out->prog_types)-1));
4816
4817 ret = FC_SUCCESS;
4818
4819 break;
4820 }
4821
4822 case EMLXS_VPD_GET_V2:
4823 {
4824 emlxs_vpd_desc_v2_t *vpd_out;
4825
4826 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4827 "fca_port_manage: VPD_GET_V2");
4828
4829 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_v2_t)) {
4830 ret = FC_BADCMD;
4831 break;
4832 }
4833
4834 vpd_out = (emlxs_vpd_desc_v2_t *)pm->pm_stat_buf;
4835 bzero(vpd_out, pm->pm_stat_len);
4836
4837 (void) strncpy(vpd_out->id, vpd->id,
4838 (sizeof (vpd_out->id)-1));
4839 (void) strncpy(vpd_out->part_num, vpd->part_num,
4840 (sizeof (vpd_out->part_num)-1));
4841 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4842 (sizeof (vpd_out->eng_change)-1));
4843 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4844 (sizeof (vpd_out->manufacturer)-1));
4845 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4846 (sizeof (vpd_out->serial_num)-1));
4847 (void) strncpy(vpd_out->model, vpd->model,
4848 (sizeof (vpd_out->model)-1));
4849 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4850 (sizeof (vpd_out->model_desc)-1));
4851 (void) strncpy(vpd_out->port_num, vpd->port_num,
4852 (sizeof (vpd_out->port_num)-1));
4853 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4854 (sizeof (vpd_out->prog_types)-1));
4855
4856 ret = FC_SUCCESS;
4857
4858 break;
4859 }
4860
4861 case EMLXS_PHY_GET:
4862 {
4863 emlxs_phy_desc_t *phy_out;
4864 MAILBOXQ *mbq;
4865 MAILBOX4 *mb;
4866 IOCTL_COMMON_GET_PHY_DETAILS *phy;
4867 mbox_req_hdr_t *hdr_req;
4868
4869 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4870 "fca_port_manage: EMLXS_PHY_GET");
4871
4872 if (pm->pm_stat_len < sizeof (emlxs_phy_desc_t)) {
4873 ret = FC_BADCMD;
4874 break;
4875 }
4876
4877 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4878 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4879 "Invalid sli_mode. mode=%d", hba->sli_mode);
4880 ret = FC_BADCMD;
4881 break;
4882 }
4883
4884 phy_out = (emlxs_phy_desc_t *)pm->pm_stat_buf;
4885 bzero(phy_out, sizeof (emlxs_phy_desc_t));
4886
4887 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4888 MEM_MBOX)) == 0) {
4889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4890 "Unable to allocate mailbox buffer.");
4891 ret = FC_NOMEM;
4892 break;
4893 }
4894
4895 mb = (MAILBOX4*)mbq;
4896
4897 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
4898
4899 mb->un.varSLIConfig.be.embedded = 1;
4900 mbq->mbox_cmpl = NULL;
4901
4902 mb->mbxCommand = MBX_SLI_CONFIG;
4903 mb->mbxOwner = OWN_HOST;
4904
4905 hdr_req = (mbox_req_hdr_t *)
4906 &mb->un.varSLIConfig.be.un_hdr.hdr_req;
4907 hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
4908 hdr_req->opcode = COMMON_OPCODE_GET_PHY_DETAILS;
4909 hdr_req->timeout = 0;
4910 hdr_req->req_length =
4911 sizeof (IOCTL_COMMON_GET_PHY_DETAILS);
4912
4913 phy = (IOCTL_COMMON_GET_PHY_DETAILS *)(hdr_req + 1);
4914
4915 /* Send read request */
4916 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) !=
4917 MBX_SUCCESS) {
4918 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4919 "Unable to get PHY details. status=%x",
4920 mb->mbxStatus);
4921
4922 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4923
4924 ret = FC_FAILURE;
4925 break;
4926 }
4927
4928 phy_out->phy_type = phy->params.response.phy_type;
4929 phy_out->interface_type =
4930 phy->params.response.interface_type;
4931 phy_out->misc_params = phy->params.response.misc_params;
4932 phy_out->rsvd[0] = phy->params.response.rsvd[0];
4933 phy_out->rsvd[1] = phy->params.response.rsvd[1];
4934 phy_out->rsvd[2] = phy->params.response.rsvd[2];
4935 phy_out->rsvd[3] = phy->params.response.rsvd[3];
4936
4937 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4938
4939 ret = FC_SUCCESS;
4940 break;
4941 }
4942
4943 #ifdef NODE_THROTTLE_SUPPORT
4944 case EMLXS_SET_THROTTLE:
4945 {
4946 emlxs_node_t *node;
4947 uint32_t scope = 0;
4948 uint32_t i;
4949 char buf1[32];
4950 emlxs_throttle_desc_t *desc;
4951
4952 if ((pm->pm_data_buf == NULL) ||
4953 (pm->pm_data_len !=
4954 sizeof (emlxs_throttle_desc_t))) {
4955 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4956 "fca_port_manage: EMLXS_SET_THROTTLE: "
4957 "Descriptor buffer not valid. %d",
4958 pm->pm_data_len);
4959 ret = FC_BADCMD;
4960 break;
4961 }
4962
4963 if ((pm->pm_cmd_buf != NULL) &&
4964 (pm->pm_cmd_len == sizeof (uint32_t))) {
4965 scope = *(uint32_t *)pm->pm_cmd_buf;
4966 }
4967
4968 desc = (emlxs_throttle_desc_t *)pm->pm_data_buf;
4969 desc->throttle = MIN(desc->throttle, MAX_NODE_THROTTLE);
4970
4971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4972 "fca_port_manage: EMLXS_SET_THROTTLE: scope=%d "
4973 "depth=%d",
4974 scope, desc->throttle);
4975
4976 rw_enter(&port->node_rwlock, RW_WRITER);
4977 switch (scope) {
4978 case 1: /* all */
4979 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4980 node = port->node_table[i];
4981 while (node != NULL) {
4982 node->io_throttle = desc->throttle;
4983
4984 EMLXS_MSGF(EMLXS_CONTEXT,
4985 &emlxs_sfs_debug_msg,
4986 "EMLXS_SET_THROTTLE: wwpn=%s "
4987 "depth=%d",
4988 emlxs_wwn_xlate(buf1, sizeof (buf1),
4989 (uint8_t *)&node->nlp_portname),
4990 node->io_throttle);
4991
4992 node = (NODELIST *)node->nlp_list_next;
4993 }
4994 }
4995 break;
4996
4997 case 2: /* FCP */
4998 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4999 node = port->node_table[i];
5000 while (node != NULL) {
5001 if (!(node->nlp_fcp_info &
5002 NLP_FCP_TGT_DEVICE)) {
5003 node = (NODELIST *)
5004 node->nlp_list_next;
5005 continue;
5006 }
5007
5008 node->io_throttle = desc->throttle;
5009
5010 EMLXS_MSGF(EMLXS_CONTEXT,
5011 &emlxs_sfs_debug_msg,
5012 "EMLXS_SET_THROTTLE: wwpn=%s "
5013 "depth=%d",
5014 emlxs_wwn_xlate(buf1, sizeof (buf1),
5015 (uint8_t *)&node->nlp_portname),
5016 node->io_throttle);
5017
5018 node = (NODELIST *)node->nlp_list_next;
5019 }
5020 }
5021 break;
5022
5023 case 0: /* WWPN */
5024 default:
5025 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5026 node = port->node_table[i];
5027 while (node != NULL) {
5028 if (bcmp((caddr_t)&node->nlp_portname,
5029 desc->wwpn, 8)) {
5030 node = (NODELIST *)
5031 node->nlp_list_next;
5032 continue;
5033 }
5034
5035 node->io_throttle = desc->throttle;
5036
5037 EMLXS_MSGF(EMLXS_CONTEXT,
5038 &emlxs_sfs_debug_msg,
5039 "EMLXS_SET_THROTTLE: wwpn=%s "
5040 "depth=%d",
5041 emlxs_wwn_xlate(buf1, sizeof (buf1),
5042 (uint8_t *)&node->nlp_portname),
5043 node->io_throttle);
5044
5045 goto set_throttle_done;
5046 }
5047 }
5048 set_throttle_done:
5049 break;
5050 }
5051
5052 rw_exit(&port->node_rwlock);
5053 ret = FC_SUCCESS;
5054
5055 break;
5056 }
5057
5058 case EMLXS_GET_THROTTLE:
5059 {
5060 emlxs_node_t *node;
5061 uint32_t i;
5062 uint32_t j;
5063 char buf1[32];
5064 uint32_t count;
5065 emlxs_throttle_desc_t *desc;
5066
5067 if (pm->pm_stat_len == sizeof (uint32_t)) {
5068 count = emlxs_nport_count(port);
5069 *(uint32_t *)pm->pm_stat_buf = count;
5070
5071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5072 "fca_port_manage: EMLXS_GET_THROTTLE: "
5073 "count=%d",
5074 count);
5075
5076 ret = FC_SUCCESS;
5077 break;
5078 }
5079
5080 if ((pm->pm_stat_buf == NULL) ||
5081 (pm->pm_stat_len <
5082 sizeof (emlxs_throttle_desc_t))) {
5083 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5084 "fca_port_manage: EMLXS_GET_THROTTLE: "
5085 "Descriptor buffer too small. %d",
5086 pm->pm_data_len);
5087 ret = FC_BADCMD;
5088 break;
5089 }
5090
5091 count = pm->pm_stat_len /
5092 sizeof (emlxs_throttle_desc_t);
5093 desc = (emlxs_throttle_desc_t *)pm->pm_stat_buf;
5094
5095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5096 "fca_port_manage: EMLXS_GET_THROTTLE: max=%d",
5097 count);
5098
5099 rw_enter(&port->node_rwlock, RW_READER);
5100 j = 0;
5101 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5102 node = port->node_table[i];
5103 while (node != NULL) {
5104 if ((node->nlp_DID & 0xFFF000) ==
5105 0xFFF000) {
5106 node = (NODELIST *)
5107 node->nlp_list_next;
5108 continue;
5109 }
5110
5111 bcopy((uint8_t *)&node->nlp_portname,
5112 desc[j].wwpn, 8);
5113 desc[j].throttle = node->io_throttle;
5114
5115 EMLXS_MSGF(EMLXS_CONTEXT,
5116 &emlxs_sfs_debug_msg,
5117 "EMLXS_GET_THROTTLE: wwpn=%s "
5118 "depth=%d",
5119 emlxs_wwn_xlate(buf1, sizeof (buf1),
5120 desc[j].wwpn),
5121 desc[j].throttle);
5122
5123 j++;
5124 if (j >= count) {
5125 goto get_throttle_done;
5126 }
5127
5128 node = (NODELIST *)node->nlp_list_next;
5129 }
5130 }
5131 get_throttle_done:
5132 rw_exit(&port->node_rwlock);
5133 ret = FC_SUCCESS;
5134
5135 break;
5136 }
5137 #endif /* NODE_THROTTLE_SUPPORT */
5138
5139 case EMLXS_GET_FCIO_REV:
5140 {
5141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5142 "fca_port_manage: GET_FCIO_REV");
5143
5144 if (pm->pm_stat_len < sizeof (uint32_t)) {
5145 ret = FC_NOMEM;
5146 break;
5147 }
5148
5149 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5150 *(uint32_t *)pm->pm_stat_buf = FCIO_REV;
5151
5152 break;
5153 }
5154
5155 case EMLXS_GET_DFC_REV:
5156 {
5157 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5158 "fca_port_manage: GET_DFC_REV");
5159
5160 if (pm->pm_stat_len < sizeof (uint32_t)) {
5161 ret = FC_NOMEM;
5162 break;
5163 }
5164
5165 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5166 *(uint32_t *)pm->pm_stat_buf = DFC_REV;
5167
5168 break;
5169 }
5170
5171 case EMLXS_SET_BOOT_STATE:
5172 case EMLXS_SET_BOOT_STATE_old:
5173 {
5174 uint32_t state;
5175
5176 if (!(hba->flag & FC_ONLINE_MODE)) {
5177 return (FC_OFFLINE);
5178 }
5179 if (pm->pm_cmd_len < sizeof (uint32_t)) {
5180 EMLXS_MSGF(EMLXS_CONTEXT,
5181 &emlxs_sfs_debug_msg,
5182 "fca_port_manage: SET_BOOT_STATE");
5183 ret = FC_BADCMD;
5184 break;
5185 }
5186
5187 state = *(uint32_t *)pm->pm_cmd_buf;
5188
5189 if (state == 0) {
5190 EMLXS_MSGF(EMLXS_CONTEXT,
5191 &emlxs_sfs_debug_msg,
5192 "fca_port_manage: SET_BOOT_STATE: "
5193 "Disable");
5194 ret = emlxs_boot_code_disable(hba);
5195 } else {
5196 EMLXS_MSGF(EMLXS_CONTEXT,
5197 &emlxs_sfs_debug_msg,
5198 "fca_port_manage: SET_BOOT_STATE: "
5199 "Enable");
5200 ret = emlxs_boot_code_enable(hba);
5201 }
5202
5203 break;
5204 }
5205
5206 case EMLXS_GET_BOOT_STATE:
5207 case EMLXS_GET_BOOT_STATE_old:
5208 {
5209 if (!(hba->flag & FC_ONLINE_MODE)) {
5210 return (FC_OFFLINE);
5211 }
5212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5213 "fca_port_manage: GET_BOOT_STATE");
5214
5215 if (pm->pm_stat_len < sizeof (uint32_t)) {
5216 ret = FC_NOMEM;
5217 break;
5218 }
5219 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5220
5221 ret = emlxs_boot_code_state(hba);
5222
5223 if (ret == FC_SUCCESS) {
5224 *(uint32_t *)pm->pm_stat_buf = 1;
5225 ret = FC_SUCCESS;
5226 } else if (ret == FC_FAILURE) {
5227 ret = FC_SUCCESS;
5228 }
5229
5230 break;
5231 }
5232
5233 case EMLXS_HW_ERROR_TEST:
5234 {
5235 /*
5236 * This command is used for simulating HW ERROR
5237 * on SLI4 only.
5238 */
5239 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5240 ret = FC_INVALID_REQUEST;
5241 break;
5242 }
5243 hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR;
5244 break;
5245 }
5246
5247 case EMLXS_MB_TIMEOUT_TEST:
5248 {
5249 if (!(hba->flag & FC_ONLINE_MODE)) {
5250 return (FC_OFFLINE);
5251 }
5252
5253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5254 "fca_port_manage: HW_ERROR_TEST");
5255
5256 /* Trigger a mailbox timeout */
5257 hba->mbox_timer = hba->timer_tics;
5258
5259 break;
5260 }
5261
5262 case EMLXS_TEST_CODE:
5263 {
5264 uint32_t *cmd;
5265
5266 if (!(hba->flag & FC_ONLINE_MODE)) {
5267 return (FC_OFFLINE);
5268 }
5269
5270 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5271 "fca_port_manage: TEST_CODE");
5272
5273 if (pm->pm_cmd_len < sizeof (uint32_t)) {
5274 EMLXS_MSGF(EMLXS_CONTEXT,
5275 &emlxs_sfs_debug_msg,
5276 "fca_port_manage: TEST_CODE. "
5277 "inbuf to small.");
5278
5279 ret = FC_BADCMD;
5280 break;
5281 }
5282
5283 cmd = (uint32_t *)pm->pm_cmd_buf;
5284
5285 ret = emlxs_test(hba, cmd[0],
5286 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
5287
5288 break;
5289 }
5290
5291 case EMLXS_BAR_IO:
5292 {
5293 uint32_t *cmd;
5294 uint32_t *datap;
5295 FCIO_Q_STAT_t *qp;
5296 clock_t time;
5297 uint32_t offset;
5298 caddr_t addr;
5299 uint32_t i;
5300 uint32_t tx_cnt;
5301 uint32_t chip_cnt;
5302
5303 cmd = (uint32_t *)pm->pm_cmd_buf;
5304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5305 "fca_port_manage: BAR_IO %x %x %x",
5306 cmd[0], cmd[1], cmd[2]);
5307
5308 offset = cmd[1];
5309
5310 ret = FC_SUCCESS;
5311
5312 switch (cmd[0]) {
5313 case 2: /* bar1read */
5314 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5315 return (FC_BADCMD);
5316 }
5317
5318 /* Registers in this range are invalid */
5319 if ((offset >= 0x4C00) && (offset < 0x5000)) {
5320 return (FC_BADCMD);
5321 }
5322 if ((offset >= 0x5800) || (offset & 0x3)) {
5323 return (FC_BADCMD);
5324 }
5325 datap = (uint32_t *)pm->pm_stat_buf;
5326
5327 for (i = 0; i < pm->pm_stat_len;
5328 i += sizeof (uint32_t)) {
5329 if ((offset >= 0x4C00) &&
5330 (offset < 0x5000)) {
5331 pm->pm_stat_len = i;
5332 break;
5333 }
5334 if (offset >= 0x5800) {
5335 pm->pm_stat_len = i;
5336 break;
5337 }
5338 addr = hba->sli.sli4.bar1_addr + offset;
5339 *datap = READ_BAR1_REG(hba, addr);
5340 datap++;
5341 offset += sizeof (uint32_t);
5342 }
5343 #ifdef FMA_SUPPORT
5344 /* Access handle validation */
5345 EMLXS_CHK_ACC_HANDLE(hba,
5346 hba->sli.sli4.bar1_acc_handle);
5347 #endif /* FMA_SUPPORT */
5348 break;
5349 case 3: /* bar2read */
5350 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5351 return (FC_BADCMD);
5352 }
5353 if ((offset >= 0x1000) || (offset & 0x3)) {
5354 return (FC_BADCMD);
5355 }
5356 datap = (uint32_t *)pm->pm_stat_buf;
5357
5358 for (i = 0; i < pm->pm_stat_len;
5359 i += sizeof (uint32_t)) {
5360 *datap = READ_BAR2_REG(hba,
5361 hba->sli.sli4.bar2_addr + offset);
5362 datap++;
5363 offset += sizeof (uint32_t);
5364 }
5365 #ifdef FMA_SUPPORT
5366 /* Access handle validation */
5367 EMLXS_CHK_ACC_HANDLE(hba,
5368 hba->sli.sli4.bar2_acc_handle);
5369 #endif /* FMA_SUPPORT */
5370 break;
5371 case 4: /* bar1write */
5372 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5373 return (FC_BADCMD);
5374 }
5375 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
5376 offset, cmd[2]);
5377 #ifdef FMA_SUPPORT
5378 /* Access handle validation */
5379 EMLXS_CHK_ACC_HANDLE(hba,
5380 hba->sli.sli4.bar1_acc_handle);
5381 #endif /* FMA_SUPPORT */
5382 break;
5383 case 5: /* bar2write */
5384 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5385 return (FC_BADCMD);
5386 }
5387 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
5388 offset, cmd[2]);
5389 #ifdef FMA_SUPPORT
5390 /* Access handle validation */
5391 EMLXS_CHK_ACC_HANDLE(hba,
5392 hba->sli.sli4.bar2_acc_handle);
5393 #endif /* FMA_SUPPORT */
5394 break;
5395 case 6: /* dumpbsmbox */
5396 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5397 return (FC_BADCMD);
5398 }
5399 if (offset != 0) {
5400 return (FC_BADCMD);
5401 }
5402
5403 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
5404 (caddr_t)pm->pm_stat_buf, 256);
5405 break;
5406 case 7: /* pciread */
5407 if ((offset >= 0x200) || (offset & 0x3)) {
5408 return (FC_BADCMD);
5409 }
5410 datap = (uint32_t *)pm->pm_stat_buf;
5411 for (i = 0; i < pm->pm_stat_len;
5412 i += sizeof (uint32_t)) {
5413 *datap = ddi_get32(hba->pci_acc_handle,
5414 (uint32_t *)(hba->pci_addr +
5415 offset));
5416 datap++;
5417 offset += sizeof (uint32_t);
5418 }
5419 #ifdef FMA_SUPPORT
5420 /* Access handle validation */
5421 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
5422 #endif /* FMA_SUPPORT */
5423 break;
5424 case 8: /* abortall */
5425 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5426 return (FC_BADCMD);
5427 }
5428 emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
5429 datap = (uint32_t *)pm->pm_stat_buf;
5430 *datap++ = tx_cnt;
5431 *datap = chip_cnt;
5432 break;
5433 case 9: /* get_q_info */
5434 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5435 return (FC_BADCMD);
5436 }
5437 qp = (FCIO_Q_STAT_t *)pm->pm_stat_buf;
5438 for (i = 0; i < FCIO_MAX_EQS; i++) {
5439 addr = hba->sli.sli4.eq[i].addr.virt;
5440 qp->eq[i].host_index =
5441 hba->sli.sli4.eq[i].host_index;
5442 qp->eq[i].max_index =
5443 hba->sli.sli4.eq[i].max_index;
5444 qp->eq[i].qid =
5445 hba->sli.sli4.eq[i].qid;
5446 qp->eq[i].msix_vector =
5447 hba->sli.sli4.eq[i].msix_vector;
5448 qp->eq[i].phys =
5449 hba->sli.sli4.eq[i].addr.phys;
5450 qp->eq[i].virt = PADDR_LO(
5451 (uintptr_t)addr);
5452 qp->eq[i].virt_hi = PADDR_HI(
5453 (uintptr_t)addr);
5454 qp->eq[i].max_proc =
5455 hba->sli.sli4.eq[i].max_proc;
5456 qp->eq[i].isr_count =
5457 hba->sli.sli4.eq[i].isr_count;
5458 qp->eq[i].num_proc =
5459 hba->sli.sli4.eq[i].num_proc;
5460 }
5461 for (i = 0; i < FCIO_MAX_CQS; i++) {
5462 addr = hba->sli.sli4.cq[i].addr.virt;
5463 qp->cq[i].host_index =
5464 hba->sli.sli4.cq[i].host_index;
5465 qp->cq[i].max_index =
5466 hba->sli.sli4.cq[i].max_index;
5467 qp->cq[i].qid =
5468 hba->sli.sli4.cq[i].qid;
5469 qp->cq[i].eqid =
5470 hba->sli.sli4.cq[i].eqid;
5471 qp->cq[i].type =
5472 hba->sli.sli4.cq[i].type;
5473 qp->cq[i].phys =
5474 hba->sli.sli4.cq[i].addr.phys;
5475 qp->cq[i].virt = PADDR_LO(
5476 (uintptr_t)addr);
5477 qp->cq[i].virt_hi = PADDR_HI(
5478 (uintptr_t)addr);
5479 qp->cq[i].max_proc =
5480 hba->sli.sli4.cq[i].max_proc;
5481 qp->cq[i].isr_count =
5482 hba->sli.sli4.cq[i].isr_count;
5483 qp->cq[i].num_proc =
5484 hba->sli.sli4.cq[i].num_proc;
5485 }
5486 for (i = 0; i < FCIO_MAX_WQS; i++) {
5487 addr = hba->sli.sli4.wq[i].addr.virt;
5488 qp->wq[i].host_index =
5489 hba->sli.sli4.wq[i].host_index;
5490 qp->wq[i].max_index =
5491 hba->sli.sli4.wq[i].max_index;
5492 qp->wq[i].port_index =
5493 hba->sli.sli4.wq[i].port_index;
5494 qp->wq[i].release_depth =
5495 hba->sli.sli4.wq[i].release_depth;
5496 qp->wq[i].qid =
5497 hba->sli.sli4.wq[i].qid;
5498 qp->wq[i].cqid =
5499 hba->sli.sli4.wq[i].cqid;
5500 qp->wq[i].phys =
5501 hba->sli.sli4.wq[i].addr.phys;
5502 qp->wq[i].virt = PADDR_LO(
5503 (uintptr_t)addr);
5504 qp->wq[i].virt_hi = PADDR_HI(
5505 (uintptr_t)addr);
5506 qp->wq[i].num_proc =
5507 hba->sli.sli4.wq[i].num_proc;
5508 qp->wq[i].num_busy =
5509 hba->sli.sli4.wq[i].num_busy;
5510 }
5511 for (i = 0; i < FCIO_MAX_RQS; i++) {
5512 addr = hba->sli.sli4.rq[i].addr.virt;
5513 qp->rq[i].qid =
5514 hba->sli.sli4.rq[i].qid;
5515 qp->rq[i].cqid =
5516 hba->sli.sli4.rq[i].cqid;
5517 qp->rq[i].host_index =
5518 hba->sli.sli4.rq[i].host_index;
5519 qp->rq[i].max_index =
5520 hba->sli.sli4.rq[i].max_index;
5521 qp->rq[i].phys =
5522 hba->sli.sli4.rq[i].addr.phys;
5523 qp->rq[i].virt = PADDR_LO(
5524 (uintptr_t)addr);
5525 qp->rq[i].virt_hi = PADDR_HI(
5526 (uintptr_t)addr);
5527 qp->rq[i].num_proc =
5528 hba->sli.sli4.rq[i].num_proc;
5529 }
5530 qp->que_start_timer =
5531 hba->sli.sli4.que_stat_timer;
5532 (void) drv_getparm(LBOLT, &time);
5533 qp->que_current_timer = (uint32_t)time;
5534 qp->intr_count = hba->intr_count;
5535 break;
5536 case 10: /* zero_q_stat */
5537 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5538 return (FC_BADCMD);
5539 }
5540 emlxs_sli4_zero_queue_stat(hba);
5541 break;
5542 default:
5543 ret = FC_BADCMD;
5544 break;
5545 }
5546 break;
5547 }
5548
5549 default:
5550
5551 ret = FC_INVALID_REQUEST;
5552 break;
5553 }
5554
5555 break;
5556
5557 }
5558
5559 case FC_PORT_INITIALIZE:
5560 if (!(hba->flag & FC_ONLINE_MODE)) {
5561 return (FC_OFFLINE);
5562 }
5563 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5564 "fca_port_manage: FC_PORT_INITIALIZE");
5565 break;
5566
5567 case FC_PORT_LOOPBACK:
5568 if (!(hba->flag & FC_ONLINE_MODE)) {
5569 return (FC_OFFLINE);
5570 }
5571 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5572 "fca_port_manage: FC_PORT_LOOPBACK");
5573 break;
5574
5575 case FC_PORT_BYPASS:
5576 if (!(hba->flag & FC_ONLINE_MODE)) {
5577 return (FC_OFFLINE);
5578 }
5579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5580 "fca_port_manage: FC_PORT_BYPASS");
5581 ret = FC_INVALID_REQUEST;
5582 break;
5583
5584 case FC_PORT_UNBYPASS:
5585 if (!(hba->flag & FC_ONLINE_MODE)) {
5586 return (FC_OFFLINE);
5587 }
5588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5589 "fca_port_manage: FC_PORT_UNBYPASS");
5590 ret = FC_INVALID_REQUEST;
5591 break;
5592
5593 case FC_PORT_GET_NODE_ID:
5594 {
5595 fc_rnid_t *rnid;
5596
5597 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5598 "fca_port_manage: FC_PORT_GET_NODE_ID");
5599
5600 bzero(pm->pm_data_buf, pm->pm_data_len);
5601
5602 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5603 ret = FC_NOMEM;
5604 break;
5605 }
5606
5607 rnid = (fc_rnid_t *)pm->pm_data_buf;
5608
5609 (void) snprintf((char *)rnid->global_id,
5610 (sizeof (rnid->global_id)-1),
5611 "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5612 hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5613 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5614 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5615 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5616
5617 rnid->unit_type = RNID_HBA;
5618 rnid->port_id = port->did;
5619 rnid->ip_version = RNID_IPV4;
5620
5621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5622 "GET_NODE_ID: wwpn: %s", rnid->global_id);
5623 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5624 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5626 "GET_NODE_ID: port_id: 0x%x", rnid->port_id);
5627 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5628 "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5629 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5630 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5632 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5633 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5634 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5635 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5636 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5637 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5638 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5639
5640 ret = FC_SUCCESS;
5641 break;
5642 }
5643
5644 case FC_PORT_SET_NODE_ID:
5645 {
5646 fc_rnid_t *rnid;
5647
5648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5649 "fca_port_manage: FC_PORT_SET_NODE_ID");
5650
5651 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5652 ret = FC_NOMEM;
5653 break;
5654 }
5655
5656 rnid = (fc_rnid_t *)pm->pm_data_buf;
5657
5658 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5659 "SET_NODE_ID: wwpn: %s", rnid->global_id);
5660 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5661 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5662 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5663 "SET_NODE_ID: port_id: 0x%x", rnid->port_id);
5664 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5665 "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5667 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5668 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5669 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5670 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5671 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5673 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5675 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5676
5677 ret = FC_SUCCESS;
5678 break;
5679 }
5680
5681 #ifdef S11
5682 case FC_PORT_GET_P2P_INFO:
5683 {
5684 fc_fca_p2p_info_t *p2p_info;
5685 NODELIST *ndlp;
5686
5687 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5688 "fca_port_manage: FC_PORT_GET_P2P_INFO");
5689
5690 bzero(pm->pm_data_buf, pm->pm_data_len);
5691
5692 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5693 ret = FC_NOMEM;
5694 break;
5695 }
5696
5697 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5698
5699 if (hba->state >= FC_LINK_UP) {
5700 if ((hba->topology == TOPOLOGY_PT_PT) &&
5701 (hba->flag & FC_PT_TO_PT)) {
5702 p2p_info->fca_d_id = port->did;
5703 p2p_info->d_id = port->rdid;
5704
5705 ndlp = emlxs_node_find_did(port,
5706 port->rdid, 1);
5707
5708 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5709 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5710 "d_id: 0x%x, ndlp: 0x%p", port->did,
5711 port->rdid, ndlp);
5712 if (ndlp) {
5713 bcopy(&ndlp->nlp_portname,
5714 (caddr_t)&p2p_info->pwwn,
5715 sizeof (la_wwn_t));
5716 bcopy(&ndlp->nlp_nodename,
5717 (caddr_t)&p2p_info->nwwn,
5718 sizeof (la_wwn_t));
5719
5720 ret = FC_SUCCESS;
5721 break;
5722
5723 }
5724 }
5725 }
5726
5727 ret = FC_FAILURE;
5728 break;
5729 }
5730 #endif /* S11 */
5731
5732 default:
5733 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5734 "fca_port_manage: code=%x", pm->pm_cmd_code);
5735 ret = FC_INVALID_REQUEST;
5736 break;
5737
5738 }
5739
5740 return (ret);
5741
5742 } /* emlxs_fca_port_manage() */
5743
5744
5745 /*ARGSUSED*/
5746 static uint32_t
emlxs_test(emlxs_hba_t * hba,uint32_t test_code,uint32_t args,uint32_t * arg)5747 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5748 uint32_t *arg)
5749 {
5750 uint32_t rval = 0;
5751 emlxs_port_t *port = &PPORT;
5752
5753 switch (test_code) {
5754 #ifdef TEST_SUPPORT
5755 case 1: /* SCSI underrun */
5756 {
5757 hba->underrun_counter = (args)? arg[0]:1;
5758 break;
5759 }
5760 #endif /* TEST_SUPPORT */
5761
5762 default:
5763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5764 "test: Unsupported test code. (0x%x)", test_code);
5765 rval = FC_INVALID_REQUEST;
5766 }
5767
5768 return (rval);
5769
5770 } /* emlxs_test() */
5771
5772
5773 /*
5774 * Given the device number, return the devinfo pointer or the ddiinst number.
5775 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5776 * before attach.
5777 *
5778 * Translate "dev_t" to a pointer to the associated "dev_info_t".
5779 */
5780 /*ARGSUSED*/
5781 static int
emlxs_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)5782 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5783 {
5784 emlxs_hba_t *hba;
5785 int32_t ddiinst;
5786
5787 ddiinst = getminor((dev_t)arg);
5788
5789 switch (infocmd) {
5790 case DDI_INFO_DEVT2DEVINFO:
5791 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5792 if (hba)
5793 *result = hba->dip;
5794 else
5795 *result = NULL;
5796 break;
5797
5798 case DDI_INFO_DEVT2INSTANCE:
5799 *result = (void *)((unsigned long)ddiinst);
5800 break;
5801
5802 default:
5803 return (DDI_FAILURE);
5804 }
5805
5806 return (DDI_SUCCESS);
5807
5808 } /* emlxs_info() */
5809
5810
5811 static int32_t
emlxs_power(dev_info_t * dip,int32_t comp,int32_t level)5812 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5813 {
5814 emlxs_hba_t *hba;
5815 emlxs_port_t *port;
5816 int32_t ddiinst;
5817 int rval = DDI_SUCCESS;
5818
5819 ddiinst = ddi_get_instance(dip);
5820 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5821 port = &PPORT;
5822
5823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5824 "fca_power: comp=%x level=%x", comp, level);
5825
5826 if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5827 return (DDI_FAILURE);
5828 }
5829
5830 mutex_enter(&EMLXS_PM_LOCK);
5831
5832 /* If we are already at the proper level then return success */
5833 if (hba->pm_level == level) {
5834 mutex_exit(&EMLXS_PM_LOCK);
5835 return (DDI_SUCCESS);
5836 }
5837
5838 switch (level) {
5839 case EMLXS_PM_ADAPTER_UP:
5840
5841 /*
5842 * If we are already in emlxs_attach,
5843 * let emlxs_hba_attach take care of things
5844 */
5845 if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5846 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5847 break;
5848 }
5849
5850 /* Check if adapter is suspended */
5851 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5852 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5853
5854 /* Try to resume the port */
5855 rval = emlxs_hba_resume(dip);
5856
5857 if (rval != DDI_SUCCESS) {
5858 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5859 }
5860 break;
5861 }
5862
5863 /* Set adapter up */
5864 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5865 break;
5866
5867 case EMLXS_PM_ADAPTER_DOWN:
5868
5869
5870 /*
5871 * If we are already in emlxs_detach,
5872 * let emlxs_hba_detach take care of things
5873 */
5874 if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5875 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5876 break;
5877 }
5878
5879 /* Check if adapter is not suspended */
5880 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5881 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5882
5883 /* Try to suspend the port */
5884 rval = emlxs_hba_suspend(dip);
5885
5886 if (rval != DDI_SUCCESS) {
5887 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5888 }
5889
5890 break;
5891 }
5892
5893 /* Set adapter down */
5894 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5895 break;
5896
5897 default:
5898 rval = DDI_FAILURE;
5899 break;
5900
5901 }
5902
5903 mutex_exit(&EMLXS_PM_LOCK);
5904
5905 return (rval);
5906
5907 } /* emlxs_power() */
5908
5909
5910 #ifdef EMLXS_I386
5911 #ifdef S11
5912 /*
5913 * quiesce(9E) entry point.
5914 *
5915 * This function is called when the system is single-thread at hight PIL
5916 * with preemption disabled. Therefore, this function must not be blocked.
5917 *
5918 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5919 * DDI_FAILURE indicates an error condition and should almost never happen.
5920 */
5921 static int
emlxs_quiesce(dev_info_t * dip)5922 emlxs_quiesce(dev_info_t *dip)
5923 {
5924 emlxs_hba_t *hba;
5925 emlxs_port_t *port;
5926 int32_t ddiinst;
5927 int rval = DDI_SUCCESS;
5928
5929 ddiinst = ddi_get_instance(dip);
5930 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5931 port = &PPORT;
5932
5933 if (hba == NULL || port == NULL) {
5934 return (DDI_FAILURE);
5935 }
5936
5937 /* The fourth arg 1 indicates the call is from quiesce */
5938 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5939 return (rval);
5940 } else {
5941 return (DDI_FAILURE);
5942 }
5943
5944 } /* emlxs_quiesce */
5945 #endif /* S11 */
5946 #endif /* EMLXS_I386 */
5947
5948
5949 static int
emlxs_open(dev_t * dev_p,int32_t flag,int32_t otype,cred_t * cred_p)5950 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5951 {
5952 emlxs_hba_t *hba;
5953 emlxs_port_t *port;
5954 int ddiinst;
5955
5956 ddiinst = getminor(*dev_p);
5957 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5958
5959 if (hba == NULL) {
5960 return (ENXIO);
5961 }
5962
5963 port = &PPORT;
5964
5965 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5967 "open failed: Driver suspended.");
5968 return (ENXIO);
5969 }
5970
5971 if (otype != OTYP_CHR) {
5972 return (EINVAL);
5973 }
5974
5975 if (drv_priv(cred_p)) {
5976 return (EPERM);
5977 }
5978
5979 mutex_enter(&EMLXS_IOCTL_LOCK);
5980
5981 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5982 mutex_exit(&EMLXS_IOCTL_LOCK);
5983 return (EBUSY);
5984 }
5985
5986 if (flag & FEXCL) {
5987 if (hba->ioctl_flags & EMLXS_OPEN) {
5988 mutex_exit(&EMLXS_IOCTL_LOCK);
5989 return (EBUSY);
5990 }
5991
5992 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5993 }
5994
5995 hba->ioctl_flags |= EMLXS_OPEN;
5996
5997 mutex_exit(&EMLXS_IOCTL_LOCK);
5998
5999 return (0);
6000
6001 } /* emlxs_open() */
6002
6003
6004 /*ARGSUSED*/
6005 static int
emlxs_close(dev_t dev,int32_t flag,int32_t otype,cred_t * cred_p)6006 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
6007 {
6008 emlxs_hba_t *hba;
6009 int ddiinst;
6010
6011 ddiinst = getminor(dev);
6012 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6013
6014 if (hba == NULL) {
6015 return (ENXIO);
6016 }
6017
6018 if (otype != OTYP_CHR) {
6019 return (EINVAL);
6020 }
6021
6022 mutex_enter(&EMLXS_IOCTL_LOCK);
6023
6024 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6025 mutex_exit(&EMLXS_IOCTL_LOCK);
6026 return (ENODEV);
6027 }
6028
6029 hba->ioctl_flags &= ~EMLXS_OPEN;
6030 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
6031
6032 mutex_exit(&EMLXS_IOCTL_LOCK);
6033
6034 return (0);
6035
6036 } /* emlxs_close() */
6037
6038
6039 /*ARGSUSED*/
6040 static int
emlxs_ioctl(dev_t dev,int32_t cmd,intptr_t arg,int32_t mode,cred_t * cred_p,int32_t * rval_p)6041 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
6042 cred_t *cred_p, int32_t *rval_p)
6043 {
6044 emlxs_hba_t *hba;
6045 emlxs_port_t *port;
6046 int rval = 0; /* return code */
6047 int ddiinst;
6048
6049 ddiinst = getminor(dev);
6050 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6051
6052 if (hba == NULL) {
6053 return (ENXIO);
6054 }
6055
6056 port = &PPORT;
6057
6058 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6059 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6060 "ioctl failed: Driver suspended.");
6061
6062 return (ENXIO);
6063 }
6064
6065 mutex_enter(&EMLXS_IOCTL_LOCK);
6066 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6067 mutex_exit(&EMLXS_IOCTL_LOCK);
6068 return (ENXIO);
6069 }
6070 mutex_exit(&EMLXS_IOCTL_LOCK);
6071
6072 #ifdef IDLE_TIMER
6073 emlxs_pm_busy_component(hba);
6074 #endif /* IDLE_TIMER */
6075
6076 switch (cmd) {
6077 case EMLXS_DFC_COMMAND:
6078 rval = emlxs_dfc_manage(hba, (void *)arg, mode);
6079 break;
6080
6081 default:
6082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6083 "ioctl: Invalid command received. cmd=%x", cmd);
6084 rval = EINVAL;
6085 }
6086
6087 done:
6088 return (rval);
6089
6090 } /* emlxs_ioctl() */
6091
6092
6093
6094 /*
6095 *
6096 * Device Driver Common Routines
6097 *
6098 */
6099
6100 /* EMLXS_PM_LOCK must be held for this call */
6101 static int
emlxs_hba_resume(dev_info_t * dip)6102 emlxs_hba_resume(dev_info_t *dip)
6103 {
6104 emlxs_hba_t *hba;
6105 emlxs_port_t *port;
6106 int ddiinst;
6107
6108 ddiinst = ddi_get_instance(dip);
6109 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6110 port = &PPORT;
6111
6112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
6113
6114 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
6115 return (DDI_SUCCESS);
6116 }
6117
6118 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6119
6120 /* Re-enable the physical port on this HBA */
6121 port->flag |= EMLXS_PORT_ENABLED;
6122
6123 /* Take the adapter online */
6124 if (emlxs_power_up(hba)) {
6125 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
6126 "Unable to take adapter online.");
6127
6128 hba->pm_state |= EMLXS_PM_SUSPENDED;
6129
6130 return (DDI_FAILURE);
6131 }
6132
6133 return (DDI_SUCCESS);
6134
6135 } /* emlxs_hba_resume() */
6136
6137
6138 /* EMLXS_PM_LOCK must be held for this call */
6139 static int
emlxs_hba_suspend(dev_info_t * dip)6140 emlxs_hba_suspend(dev_info_t *dip)
6141 {
6142 emlxs_hba_t *hba;
6143 emlxs_port_t *port;
6144 int ddiinst;
6145
6146 ddiinst = ddi_get_instance(dip);
6147 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6148 port = &PPORT;
6149
6150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
6151
6152 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6153 return (DDI_SUCCESS);
6154 }
6155
6156 hba->pm_state |= EMLXS_PM_SUSPENDED;
6157
6158 /* Take the adapter offline */
6159 if (emlxs_power_down(hba)) {
6160 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6161
6162 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
6163 "Unable to take adapter offline.");
6164
6165 return (DDI_FAILURE);
6166 }
6167
6168 return (DDI_SUCCESS);
6169
6170 } /* emlxs_hba_suspend() */
6171
6172
6173
6174 static void
emlxs_lock_init(emlxs_hba_t * hba)6175 emlxs_lock_init(emlxs_hba_t *hba)
6176 {
6177 emlxs_port_t *port = &PPORT;
6178 uint32_t i;
6179
6180 /* Initialize the power management */
6181 mutex_init(&EMLXS_PM_LOCK, NULL, MUTEX_DRIVER,
6182 DDI_INTR_PRI(hba->intr_arg));
6183
6184 mutex_init(&EMLXS_TIMER_LOCK, NULL, MUTEX_DRIVER,
6185 DDI_INTR_PRI(hba->intr_arg));
6186
6187 cv_init(&hba->timer_lock_cv, NULL, CV_DRIVER, NULL);
6188
6189 mutex_init(&EMLXS_PORT_LOCK, NULL, MUTEX_DRIVER,
6190 DDI_INTR_PRI(hba->intr_arg));
6191
6192 mutex_init(&EMLXS_MBOX_LOCK, NULL, MUTEX_DRIVER,
6193 DDI_INTR_PRI(hba->intr_arg));
6194
6195 cv_init(&EMLXS_MBOX_CV, NULL, CV_DRIVER, NULL);
6196
6197 mutex_init(&EMLXS_LINKUP_LOCK, NULL, MUTEX_DRIVER,
6198 DDI_INTR_PRI(hba->intr_arg));
6199
6200 cv_init(&EMLXS_LINKUP_CV, NULL, CV_DRIVER, NULL);
6201
6202 mutex_init(&EMLXS_TX_CHANNEL_LOCK, NULL, MUTEX_DRIVER,
6203 DDI_INTR_PRI(hba->intr_arg));
6204
6205 for (i = 0; i < MAX_RINGS; i++) {
6206 mutex_init(&EMLXS_CMD_RING_LOCK(i), NULL, MUTEX_DRIVER,
6207 DDI_INTR_PRI(hba->intr_arg));
6208 }
6209
6210
6211 for (i = 0; i < EMLXS_MAX_WQS; i++) {
6212 mutex_init(&EMLXS_QUE_LOCK(i), NULL, MUTEX_DRIVER,
6213 DDI_INTR_PRI(hba->intr_arg));
6214 }
6215
6216 mutex_init(&EMLXS_MSIID_LOCK, NULL, MUTEX_DRIVER,
6217 DDI_INTR_PRI(hba->intr_arg));
6218
6219 mutex_init(&EMLXS_FCTAB_LOCK, NULL, MUTEX_DRIVER,
6220 DDI_INTR_PRI(hba->intr_arg));
6221
6222 mutex_init(&EMLXS_MEMGET_LOCK, NULL, MUTEX_DRIVER,
6223 DDI_INTR_PRI(hba->intr_arg));
6224
6225 mutex_init(&EMLXS_MEMPUT_LOCK, NULL, MUTEX_DRIVER,
6226 DDI_INTR_PRI(hba->intr_arg));
6227
6228 mutex_init(&EMLXS_IOCTL_LOCK, NULL, MUTEX_DRIVER,
6229 DDI_INTR_PRI(hba->intr_arg));
6230
6231 #ifdef DUMP_SUPPORT
6232 mutex_init(&EMLXS_DUMP_LOCK, NULL, MUTEX_DRIVER,
6233 DDI_INTR_PRI(hba->intr_arg));
6234 #endif /* DUMP_SUPPORT */
6235
6236 mutex_init(&EMLXS_SPAWN_LOCK, NULL, MUTEX_DRIVER,
6237 DDI_INTR_PRI(hba->intr_arg));
6238
6239 /* Create per port locks */
6240 for (i = 0; i < MAX_VPORTS; i++) {
6241 port = &VPORT(i);
6242
6243 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
6244
6245 if (i == 0) {
6246 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6247 DDI_INTR_PRI(hba->intr_arg));
6248
6249 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6250
6251 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6252 DDI_INTR_PRI(hba->intr_arg));
6253 } else {
6254 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6255 DDI_INTR_PRI(hba->intr_arg));
6256
6257 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6258
6259 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6260 DDI_INTR_PRI(hba->intr_arg));
6261 }
6262 }
6263
6264 return;
6265
6266 } /* emlxs_lock_init() */
6267
6268
6269
6270 static void
emlxs_lock_destroy(emlxs_hba_t * hba)6271 emlxs_lock_destroy(emlxs_hba_t *hba)
6272 {
6273 emlxs_port_t *port = &PPORT;
6274 uint32_t i;
6275
6276 mutex_destroy(&EMLXS_TIMER_LOCK);
6277 cv_destroy(&hba->timer_lock_cv);
6278
6279 mutex_destroy(&EMLXS_PORT_LOCK);
6280
6281 cv_destroy(&EMLXS_MBOX_CV);
6282 cv_destroy(&EMLXS_LINKUP_CV);
6283
6284 mutex_destroy(&EMLXS_LINKUP_LOCK);
6285 mutex_destroy(&EMLXS_MBOX_LOCK);
6286
6287 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
6288
6289 for (i = 0; i < MAX_RINGS; i++) {
6290 mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
6291 }
6292
6293 for (i = 0; i < EMLXS_MAX_WQS; i++) {
6294 mutex_destroy(&EMLXS_QUE_LOCK(i));
6295 }
6296
6297 mutex_destroy(&EMLXS_MSIID_LOCK);
6298
6299 mutex_destroy(&EMLXS_FCTAB_LOCK);
6300 mutex_destroy(&EMLXS_MEMGET_LOCK);
6301 mutex_destroy(&EMLXS_MEMPUT_LOCK);
6302 mutex_destroy(&EMLXS_IOCTL_LOCK);
6303 mutex_destroy(&EMLXS_SPAWN_LOCK);
6304 mutex_destroy(&EMLXS_PM_LOCK);
6305
6306 #ifdef DUMP_SUPPORT
6307 mutex_destroy(&EMLXS_DUMP_LOCK);
6308 #endif /* DUMP_SUPPORT */
6309
6310 /* Destroy per port locks */
6311 for (i = 0; i < MAX_VPORTS; i++) {
6312 port = &VPORT(i);
6313 rw_destroy(&port->node_rwlock);
6314 mutex_destroy(&EMLXS_PKT_LOCK);
6315 cv_destroy(&EMLXS_PKT_CV);
6316 mutex_destroy(&EMLXS_UB_LOCK);
6317 }
6318
6319 return;
6320
6321 } /* emlxs_lock_destroy() */
6322
6323
6324 /* init_flag values */
6325 #define ATTACH_SOFT_STATE 0x00000001
6326 #define ATTACH_FCA_TRAN 0x00000002
6327 #define ATTACH_HBA 0x00000004
6328 #define ATTACH_LOG 0x00000008
6329 #define ATTACH_MAP_BUS 0x00000010
6330 #define ATTACH_INTR_INIT 0x00000020
6331 #define ATTACH_PROP 0x00000040
6332 #define ATTACH_LOCK 0x00000080
6333 #define ATTACH_THREAD 0x00000100
6334 #define ATTACH_INTR_ADD 0x00000200
6335 #define ATTACH_ONLINE 0x00000400
6336 #define ATTACH_NODE 0x00000800
6337 #define ATTACH_FCT 0x00001000
6338 #define ATTACH_FCA 0x00002000
6339 #define ATTACH_KSTAT 0x00004000
6340 #define ATTACH_DHCHAP 0x00008000
6341 #define ATTACH_FM 0x00010000
6342 #define ATTACH_MAP_SLI 0x00020000
6343 #define ATTACH_SPAWN 0x00040000
6344 #define ATTACH_EVENTS 0x00080000
6345
6346 static void
emlxs_driver_remove(dev_info_t * dip,uint32_t init_flag,uint32_t failed)6347 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
6348 {
6349 emlxs_hba_t *hba = NULL;
6350 int ddiinst;
6351
6352 ddiinst = ddi_get_instance(dip);
6353
6354 if (init_flag & ATTACH_HBA) {
6355 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6356
6357 if (init_flag & ATTACH_SPAWN) {
6358 emlxs_thread_spawn_destroy(hba);
6359 }
6360
6361 if (init_flag & ATTACH_EVENTS) {
6362 (void) emlxs_event_queue_destroy(hba);
6363 }
6364
6365 if (init_flag & ATTACH_ONLINE) {
6366 (void) emlxs_offline(hba, 1);
6367 }
6368
6369 if (init_flag & ATTACH_INTR_ADD) {
6370 (void) EMLXS_INTR_REMOVE(hba);
6371 }
6372 #ifdef SFCT_SUPPORT
6373 if (init_flag & ATTACH_FCT) {
6374 emlxs_fct_detach(hba);
6375 emlxs_fct_modclose();
6376 }
6377 #endif /* SFCT_SUPPORT */
6378
6379 #ifdef DHCHAP_SUPPORT
6380 if (init_flag & ATTACH_DHCHAP) {
6381 emlxs_dhc_detach(hba);
6382 }
6383 #endif /* DHCHAP_SUPPORT */
6384
6385 if (init_flag & ATTACH_KSTAT) {
6386 kstat_delete(hba->kstat);
6387 }
6388
6389 if (init_flag & ATTACH_FCA) {
6390 emlxs_fca_detach(hba);
6391 }
6392
6393 if (init_flag & ATTACH_NODE) {
6394 (void) ddi_remove_minor_node(hba->dip, "devctl");
6395 }
6396
6397 if (init_flag & ATTACH_THREAD) {
6398 emlxs_thread_destroy(&hba->iodone_thread);
6399 }
6400
6401 if (init_flag & ATTACH_PROP) {
6402 (void) ddi_prop_remove_all(hba->dip);
6403 }
6404
6405 if (init_flag & ATTACH_LOCK) {
6406 emlxs_lock_destroy(hba);
6407 }
6408
6409 if (init_flag & ATTACH_INTR_INIT) {
6410 (void) EMLXS_INTR_UNINIT(hba);
6411 }
6412
6413 if (init_flag & ATTACH_MAP_BUS) {
6414 emlxs_unmap_bus(hba);
6415 }
6416
6417 if (init_flag & ATTACH_MAP_SLI) {
6418 EMLXS_SLI_UNMAP_HDW(hba);
6419 }
6420
6421 #ifdef FMA_SUPPORT
6422 if (init_flag & ATTACH_FM) {
6423 emlxs_fm_fini(hba);
6424 }
6425 #endif /* FMA_SUPPORT */
6426
6427 if (init_flag & ATTACH_LOG) {
6428 emlxs_msg_log_destroy(hba);
6429 }
6430
6431 if (init_flag & ATTACH_FCA_TRAN) {
6432 (void) ddi_set_driver_private(hba->dip, NULL);
6433 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
6434 hba->fca_tran = NULL;
6435 }
6436
6437 if (init_flag & ATTACH_HBA) {
6438 emlxs_device.log[hba->emlxinst] = 0;
6439 emlxs_device.hba[hba->emlxinst] =
6440 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
6441 #ifdef DUMP_SUPPORT
6442 emlxs_device.dump_txtfile[hba->emlxinst] = 0;
6443 emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
6444 emlxs_device.dump_ceefile[hba->emlxinst] = 0;
6445 #endif /* DUMP_SUPPORT */
6446
6447 }
6448 }
6449
6450 if (init_flag & ATTACH_SOFT_STATE) {
6451 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
6452 }
6453
6454 return;
6455
6456 } /* emlxs_driver_remove() */
6457
6458
6459 /* This determines which ports will be initiator mode */
6460 static uint32_t
emlxs_fca_init(emlxs_hba_t * hba)6461 emlxs_fca_init(emlxs_hba_t *hba)
6462 {
6463 emlxs_port_t *port = &PPORT;
6464
6465 /* Check if SFS present */
6466 if (((void *)MODSYM(fc_fca_init) == NULL) ||
6467 ((void *)MODSYM(fc_fca_attach) == NULL)) {
6468 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6469 "SFS not present.");
6470 return (1);
6471 }
6472
6473 /* Check if our SFS driver interface matches the current SFS stack */
6474 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
6475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6476 "SFS/FCA version mismatch. FCA=0x%x",
6477 hba->fca_tran->fca_version);
6478 return (1);
6479 }
6480
6481 return (0);
6482
6483 } /* emlxs_fca_init() */
6484
6485
6486 /* This determines which ports will be initiator or target mode */
6487 static void
emlxs_mode_init(emlxs_hba_t * hba)6488 emlxs_mode_init(emlxs_hba_t *hba)
6489 {
6490 emlxs_port_t *port = &PPORT;
6491 emlxs_config_t *cfg = &CFG;
6492 emlxs_port_t *vport;
6493 uint32_t i;
6494 uint32_t mode_mask;
6495
6496 /* Initialize mode masks */
6497 (void) emlxs_mode_init_masks(hba);
6498
6499 if (!(port->mode_mask & MODE_INITIATOR)) {
6500 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6501 "Initiator mode not enabled.");
6502
6503 #ifdef SFCT_SUPPORT
6504 /* Disable dynamic target mode */
6505 cfg[CFG_DTM_ENABLE].current = 0;
6506 #endif /* SFCT_SUPPORT */
6507
6508 goto done1;
6509 }
6510
6511 /* Try to initialize fca interface */
6512 if (emlxs_fca_init(hba) != 0) {
6513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6514 "Initiator mode disabled.");
6515
6516 /* Disable initiator mode */
6517 port->mode_mask &= ~MODE_INITIATOR;
6518
6519 #ifdef SFCT_SUPPORT
6520 /* Disable dynamic target mode */
6521 cfg[CFG_DTM_ENABLE].current = 0;
6522 #endif /* SFCT_SUPPORT */
6523
6524 goto done1;
6525 }
6526
6527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6528 "Initiator mode enabled.");
6529
6530 done1:
6531
6532 #ifdef SFCT_SUPPORT
6533 if (!(port->mode_mask & MODE_TARGET)) {
6534 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6535 "Target mode not enabled.");
6536
6537 /* Disable target modes */
6538 cfg[CFG_DTM_ENABLE].current = 0;
6539 cfg[CFG_TARGET_MODE].current = 0;
6540
6541 goto done2;
6542 }
6543
6544 /* Try to open the COMSTAR module */
6545 if (emlxs_fct_modopen() != 0) {
6546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6547 "Target mode disabled.");
6548
6549 /* Disable target modes */
6550 port->mode_mask &= ~MODE_TARGET;
6551 cfg[CFG_DTM_ENABLE].current = 0;
6552 cfg[CFG_TARGET_MODE].current = 0;
6553
6554 goto done2;
6555 }
6556
6557 /* Try to initialize fct interface */
6558 if (emlxs_fct_init(hba) != 0) {
6559 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6560 "Target mode disabled.");
6561
6562 /* Disable target modes */
6563 port->mode_mask &= ~MODE_TARGET;
6564 cfg[CFG_DTM_ENABLE].current = 0;
6565 cfg[CFG_TARGET_MODE].current = 0;
6566
6567 goto done2;
6568 }
6569
6570 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6571 "Target mode enabled.");
6572
6573 done2:
6574 /* Adjust target mode parameter flags */
6575 if (cfg[CFG_DTM_ENABLE].current) {
6576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6577 "Dynamic target mode enabled.");
6578
6579 cfg[CFG_TARGET_MODE].flags |= PARM_DYNAMIC;
6580 } else {
6581 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6582 "Dynamic target mode disabled.");
6583
6584 cfg[CFG_TARGET_MODE].flags &= ~PARM_DYNAMIC;
6585 }
6586 #endif /* SFCT_SUPPORT */
6587
6588 /* Now set port flags */
6589 mutex_enter(&EMLXS_PORT_LOCK);
6590
6591 /* Set flags for physical port */
6592 if (port->mode_mask & MODE_INITIATOR) {
6593 port->flag |= EMLXS_INI_ENABLED;
6594 } else {
6595 port->flag &= ~EMLXS_INI_ENABLED;
6596 }
6597
6598 if (port->mode_mask & MODE_TARGET) {
6599 port->flag |= EMLXS_TGT_ENABLED;
6600 } else {
6601 port->flag &= ~EMLXS_TGT_ENABLED;
6602 }
6603
6604 for (i = 1; i < MAX_VPORTS; i++) {
6605 vport = &VPORT(i);
6606
6607 /* Physical port mask has only allowable bits */
6608 mode_mask = vport->mode_mask & port->mode_mask;
6609
6610 /* Set flags for physical port */
6611 if (mode_mask & MODE_INITIATOR) {
6612 vport->flag |= EMLXS_INI_ENABLED;
6613 } else {
6614 vport->flag &= ~EMLXS_INI_ENABLED;
6615 }
6616
6617 if (mode_mask & MODE_TARGET) {
6618 vport->flag |= EMLXS_TGT_ENABLED;
6619 } else {
6620 vport->flag &= ~EMLXS_TGT_ENABLED;
6621 }
6622 }
6623
6624 /* Set initial driver mode */
6625 emlxs_mode_set(hba);
6626
6627 mutex_exit(&EMLXS_PORT_LOCK);
6628
6629 /* Recheck possible mode dependent parameters */
6630 /* in case conditions have changed. */
6631 if (port->mode != MODE_NONE) {
6632 for (i = 0; i < NUM_CFG_PARAM; i++) {
6633 cfg = &hba->config[i];
6634 cfg->current = emlxs_check_parm(hba, i, cfg->current);
6635 }
6636 }
6637
6638 return;
6639
6640 } /* emlxs_mode_init() */
6641
6642
6643 /* This must be called while holding the EMLXS_PORT_LOCK */
6644 extern void
emlxs_mode_set(emlxs_hba_t * hba)6645 emlxs_mode_set(emlxs_hba_t *hba)
6646 {
6647 emlxs_port_t *port = &PPORT;
6648 #ifdef SFCT_SUPPORT
6649 emlxs_config_t *cfg = &CFG;
6650 #endif /* SFCT_SUPPORT */
6651 emlxs_port_t *vport;
6652 uint32_t i;
6653 uint32_t cfg_tgt_mode = 0;
6654
6655 /* mutex_enter(&EMLXS_PORT_LOCK); */
6656
6657 #ifdef SFCT_SUPPORT
6658 cfg_tgt_mode = cfg[CFG_TARGET_MODE].current;
6659 #endif /* SFCT_SUPPORT */
6660
6661 /* Initiator mode requested */
6662 if (!cfg_tgt_mode) {
6663 for (i = 0; i < MAX_VPORTS; i++) {
6664 vport = &VPORT(i);
6665 vport->mode = (vport->flag & EMLXS_INI_ENABLED)?
6666 MODE_INITIATOR:MODE_NONE;
6667 }
6668 #ifdef SFCT_SUPPORT
6669 /* Target mode requested */
6670 } else {
6671 for (i = 0; i < MAX_VPORTS; i++) {
6672 vport = &VPORT(i);
6673 vport->mode = (vport->flag & EMLXS_TGT_ENABLED)?
6674 MODE_TARGET:MODE_NONE;
6675 }
6676 #endif /* SFCT_SUPPORT */
6677 }
6678
6679 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6680 "MODE: %s", emlxs_mode_xlate(port->mode));
6681
6682 /* mutex_exit(&EMLXS_PORT_LOCK); */
6683
6684 return;
6685
6686 } /* emlxs_mode_set() */
6687
6688
6689 static void
emlxs_mode_init_masks(emlxs_hba_t * hba)6690 emlxs_mode_init_masks(emlxs_hba_t *hba)
6691 {
6692 emlxs_port_t *port = &PPORT;
6693 emlxs_port_t *vport;
6694 uint32_t i;
6695
6696 #ifdef SFCT_SUPPORT
6697 emlxs_config_t *cfg = &CFG;
6698 uint32_t vport_mode_mask;
6699 uint32_t cfg_vport_mode_mask;
6700 uint32_t mode_mask;
6701 char string[256];
6702
6703 port->mode_mask = 0;
6704
6705 if (!cfg[CFG_TARGET_MODE].current ||
6706 cfg[CFG_DTM_ENABLE].current) {
6707 port->mode_mask |= MODE_INITIATOR;
6708 }
6709
6710 if (cfg[CFG_TARGET_MODE].current ||
6711 cfg[CFG_DTM_ENABLE].current) {
6712 port->mode_mask |= MODE_TARGET;
6713 }
6714
6715 /* Physical port mask has only allowable bits */
6716 vport_mode_mask = port->mode_mask;
6717 cfg_vport_mode_mask = cfg[CFG_VPORT_MODE_MASK].current;
6718
6719 /* Check dynamic target mode value for virtual ports */
6720 if (cfg[CFG_DTM_ENABLE].current == 0) {
6721 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6722 "%s = 0: Virtual target ports are not supported.",
6723 cfg[CFG_DTM_ENABLE].string);
6724
6725 vport_mode_mask &= ~MODE_TARGET;
6726 }
6727
6728 cfg_vport_mode_mask &= vport_mode_mask;
6729
6730 if (cfg[CFG_VPORT_MODE_MASK].current != cfg_vport_mode_mask) {
6731 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6732 "%s: Changing 0x%x --> 0x%x",
6733 cfg[CFG_VPORT_MODE_MASK].string,
6734 cfg[CFG_VPORT_MODE_MASK].current,
6735 cfg_vport_mode_mask);
6736
6737 cfg[CFG_VPORT_MODE_MASK].current = cfg_vport_mode_mask;
6738 }
6739
6740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6741 "pport-mode-mask: %s", emlxs_mode_xlate(port->mode_mask));
6742
6743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6744 "vport-mode-mask: %s", emlxs_mode_xlate(cfg_vport_mode_mask));
6745
6746 for (i = 1; i < MAX_VPORTS; i++) {
6747 vport = &VPORT(i);
6748
6749 (void) snprintf(string, sizeof (string),
6750 "%s%d-vport%d-mode-mask", DRIVER_NAME, hba->ddiinst, i);
6751
6752 mode_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6753 (void *)hba->dip, DDI_PROP_DONTPASS, string,
6754 cfg_vport_mode_mask);
6755
6756 vport->mode_mask = mode_mask & vport_mode_mask;
6757
6758 if (vport->mode_mask != cfg_vport_mode_mask) {
6759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6760 "vport%d-mode-mask: %s",
6761 i, emlxs_mode_xlate(vport->mode_mask));
6762 }
6763 }
6764 #else
6765 port->mode_mask = MODE_INITIATOR;
6766 for (i = 1; i < MAX_VPORTS; i++) {
6767 vport = &VPORT(i);
6768 vport->mode_mask = MODE_INITIATOR;
6769 }
6770 #endif /* SFCT_SUPPORT */
6771
6772 return;
6773
6774 } /* emlxs_mode_init_masks() */
6775
6776
6777 static void
emlxs_fca_attach(emlxs_hba_t * hba)6778 emlxs_fca_attach(emlxs_hba_t *hba)
6779 {
6780 emlxs_port_t *port;
6781 uint32_t i;
6782
6783 /* Update our transport structure */
6784 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg;
6785 hba->fca_tran->fca_cmd_max = hba->io_throttle;
6786
6787 for (i = 0; i < MAX_VPORTS; i++) {
6788 port = &VPORT(i);
6789 port->ub_count = EMLXS_UB_TOKEN_OFFSET;
6790 port->ub_pool = NULL;
6791 }
6792
6793 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6794 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6795 sizeof (NAME_TYPE));
6796 #endif /* >= EMLXS_MODREV5 */
6797
6798 return;
6799
6800 } /* emlxs_fca_attach() */
6801
6802
6803 static void
emlxs_fca_detach(emlxs_hba_t * hba)6804 emlxs_fca_detach(emlxs_hba_t *hba)
6805 {
6806 emlxs_port_t *port = &PPORT;
6807 uint32_t i;
6808 emlxs_port_t *vport;
6809
6810 if (!(port->flag & EMLXS_INI_ENABLED)) {
6811 return;
6812 }
6813
6814 if ((void *)MODSYM(fc_fca_detach) != NULL) {
6815 MODSYM(fc_fca_detach)(hba->dip);
6816 }
6817
6818 /* Disable INI mode for all ports */
6819 for (i = 0; i < MAX_VPORTS; i++) {
6820 vport = &VPORT(i);
6821 vport->flag &= ~EMLXS_INI_ENABLED;
6822 }
6823
6824 return;
6825
6826 } /* emlxs_fca_detach() */
6827
6828
6829 static void
emlxs_drv_banner(emlxs_hba_t * hba)6830 emlxs_drv_banner(emlxs_hba_t *hba)
6831 {
6832 emlxs_port_t *port = &PPORT;
6833 uint32_t i;
6834 char sli_mode[16];
6835 char msi_mode[16];
6836 char npiv_mode[16];
6837 emlxs_vpd_t *vpd = &VPD;
6838 uint8_t *wwpn;
6839 uint8_t *wwnn;
6840 uint32_t fw_show = 0;
6841
6842 /* Display firmware library one time for all driver instances */
6843 mutex_enter(&emlxs_device.lock);
6844 if (!(emlxs_instance_flag & EMLXS_FW_SHOW)) {
6845 emlxs_instance_flag |= EMLXS_FW_SHOW;
6846 fw_show = 1;
6847 }
6848 mutex_exit(&emlxs_device.lock);
6849
6850 if (fw_show) {
6851 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s",
6852 emlxs_copyright);
6853 emlxs_fw_show(hba);
6854 }
6855
6856 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6857 emlxs_revision);
6858
6859 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6860 "%s Ven_id:%x Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6861 hba->model_info.vendor_id, hba->model_info.device_id,
6862 hba->model_info.ssdid, hba->model_info.id);
6863
6864 #ifdef EMLXS_I386
6865
6866 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6867 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6868 vpd->boot_version);
6869
6870 #else /* EMLXS_SPARC */
6871
6872 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6873 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6874 vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6875
6876 #endif /* EMLXS_I386 */
6877
6878 if (hba->sli_mode > 3) {
6879 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d(%s)",
6880 hba->sli_mode,
6881 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6882 } else {
6883 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d",
6884 hba->sli_mode);
6885 }
6886
6887 (void) strlcpy(msi_mode, " INTX:1", sizeof (msi_mode));
6888
6889 #ifdef MSI_SUPPORT
6890 if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6891 switch (hba->intr_type) {
6892 case DDI_INTR_TYPE_FIXED:
6893 (void) strlcpy(msi_mode, " MSI:0", sizeof (msi_mode));
6894 break;
6895
6896 case DDI_INTR_TYPE_MSI:
6897 (void) snprintf(msi_mode, sizeof (msi_mode), " MSI:%d",
6898 hba->intr_count);
6899 break;
6900
6901 case DDI_INTR_TYPE_MSIX:
6902 (void) snprintf(msi_mode, sizeof (msi_mode), " MSIX:%d",
6903 hba->intr_count);
6904 break;
6905 }
6906 }
6907 #endif /* MSI_SUPPORT */
6908
6909 (void) strlcpy(npiv_mode, "", sizeof (npiv_mode));
6910
6911 if (hba->flag & FC_NPIV_ENABLED) {
6912 (void) snprintf(npiv_mode, sizeof (npiv_mode), " NPIV:%d",
6913 hba->vpi_max+1);
6914 } else {
6915 (void) strlcpy(npiv_mode, " NPIV:0", sizeof (npiv_mode));
6916 }
6917
6918 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
6919 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s%s",
6920 sli_mode, msi_mode, npiv_mode,
6921 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6922 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""),
6923 ((SLI4_FCOE_MODE)? " FCoE":" FC"));
6924 } else {
6925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6926 sli_mode, msi_mode, npiv_mode,
6927 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6928 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""));
6929 }
6930
6931 wwpn = (uint8_t *)&hba->wwpn;
6932 wwnn = (uint8_t *)&hba->wwnn;
6933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6934 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6935 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6936 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6937 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6938 wwnn[6], wwnn[7]);
6939
6940 for (i = 0; i < MAX_VPORTS; i++) {
6941 port = &VPORT(i);
6942
6943 if (!(port->flag & EMLXS_PORT_CONFIG)) {
6944 continue;
6945 }
6946
6947 wwpn = (uint8_t *)&port->wwpn;
6948 wwnn = (uint8_t *)&port->wwnn;
6949
6950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6951 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6952 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6953 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6954 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6955 wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6956 }
6957
6958 /*
6959 * Announce the device: ddi_report_dev() prints a banner at boot time,
6960 * announcing the device pointed to by dip.
6961 */
6962 (void) ddi_report_dev(hba->dip);
6963
6964 return;
6965
6966 } /* emlxs_drv_banner() */
6967
6968
6969 extern void
emlxs_get_fcode_version(emlxs_hba_t * hba)6970 emlxs_get_fcode_version(emlxs_hba_t *hba)
6971 {
6972 emlxs_vpd_t *vpd = &VPD;
6973 char *prop_str;
6974 int status;
6975
6976 /* Setup fcode version property */
6977 prop_str = NULL;
6978 status =
6979 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6980 "fcode-version", (char **)&prop_str);
6981
6982 if (status == DDI_PROP_SUCCESS) {
6983 bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6984 (void) ddi_prop_free((void *)prop_str);
6985 } else {
6986 (void) strncpy(vpd->fcode_version, "none",
6987 (sizeof (vpd->fcode_version)-1));
6988 }
6989
6990 return;
6991
6992 } /* emlxs_get_fcode_version() */
6993
6994
6995 static int
emlxs_hba_attach(dev_info_t * dip)6996 emlxs_hba_attach(dev_info_t *dip)
6997 {
6998 emlxs_hba_t *hba;
6999 emlxs_port_t *port;
7000 emlxs_config_t *cfg;
7001 char *prop_str;
7002 int ddiinst;
7003 int32_t emlxinst;
7004 int status;
7005 uint32_t rval;
7006 uint32_t init_flag = 0;
7007 char local_pm_components[32];
7008 uint32_t i;
7009
7010 ddiinst = ddi_get_instance(dip);
7011 emlxinst = emlxs_add_instance(ddiinst);
7012
7013 if (emlxinst >= MAX_FC_BRDS) {
7014 cmn_err(CE_WARN,
7015 "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
7016 "inst=%x", DRIVER_NAME, ddiinst);
7017 return (DDI_FAILURE);
7018 }
7019
7020 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
7021 return (DDI_FAILURE);
7022 }
7023
7024 if (emlxs_device.hba[emlxinst]) {
7025 return (DDI_SUCCESS);
7026 }
7027
7028 /* An adapter can accidentally be plugged into a slave-only PCI slot */
7029 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
7030 cmn_err(CE_WARN,
7031 "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
7032 DRIVER_NAME, ddiinst);
7033 return (DDI_FAILURE);
7034 }
7035
7036 /* Allocate emlxs_dev_ctl structure. */
7037 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
7038 cmn_err(CE_WARN,
7039 "?%s%d: fca_hba_attach failed. Unable to allocate soft "
7040 "state.", DRIVER_NAME, ddiinst);
7041 return (DDI_FAILURE);
7042 }
7043 init_flag |= ATTACH_SOFT_STATE;
7044
7045 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
7046 ddiinst)) == NULL) {
7047 cmn_err(CE_WARN,
7048 "?%s%d: fca_hba_attach failed. Unable to get soft state.",
7049 DRIVER_NAME, ddiinst);
7050 goto failed;
7051 }
7052 bzero((char *)hba, sizeof (emlxs_hba_t));
7053
7054 emlxs_device.hba[emlxinst] = hba;
7055 emlxs_device.log[emlxinst] = &hba->log;
7056
7057 #ifdef DUMP_SUPPORT
7058 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
7059 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
7060 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
7061 #endif /* DUMP_SUPPORT */
7062
7063 hba->dip = dip;
7064 hba->emlxinst = emlxinst;
7065 hba->ddiinst = ddiinst;
7066
7067 init_flag |= ATTACH_HBA;
7068
7069 /* Enable the physical port on this HBA */
7070 port = &PPORT;
7071 port->hba = hba;
7072 port->vpi = 0;
7073 port->flag |= EMLXS_PORT_ENABLED;
7074
7075 /* Allocate a transport structure */
7076 hba->fca_tran =
7077 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
7078 if (hba->fca_tran == NULL) {
7079 cmn_err(CE_WARN,
7080 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
7081 "memory.", DRIVER_NAME, ddiinst);
7082 goto failed;
7083 }
7084 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
7085 sizeof (fc_fca_tran_t));
7086
7087 /*
7088 * Copy the global ddi_dma_attr to the local hba fields
7089 */
7090 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
7091 sizeof (ddi_dma_attr_t));
7092 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
7093 sizeof (ddi_dma_attr_t));
7094 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
7095 sizeof (ddi_dma_attr_t));
7096 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
7097 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
7098
7099 /* Reset the fca_tran dma_attr fields to the per-hba copies */
7100 hba->fca_tran->fca_dma_attr = &hba->dma_attr;
7101 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
7102 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
7103 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
7104 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
7105 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
7106 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
7107 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
7108
7109 /* Set the transport structure pointer in our dip */
7110 /* SFS may panic if we are in target only mode */
7111 /* We will update the transport structure later */
7112 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
7113 init_flag |= ATTACH_FCA_TRAN;
7114
7115 /* Perform driver integrity check */
7116 rval = emlxs_integrity_check(hba);
7117 if (rval) {
7118 cmn_err(CE_WARN,
7119 "?%s%d: fca_hba_attach failed. Driver integrity check "
7120 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
7121 goto failed;
7122 }
7123
7124 cfg = &CFG;
7125
7126 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
7127 /*
7128 * Gen7 chips respond with unknown command so we disable heartbeat
7129 * it can be re enabled in emlxs.conf
7130 */
7131 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_6)
7132 cfg[CFG_HEARTBEAT_ENABLE].current = 0;
7133
7134 #ifdef MSI_SUPPORT
7135 if ((void *)&ddi_intr_get_supported_types != NULL) {
7136 hba->intr_flags |= EMLXS_MSI_ENABLED;
7137 }
7138 #endif /* MSI_SUPPORT */
7139
7140
7141 /* Create the msg log file */
7142 if (emlxs_msg_log_create(hba) == 0) {
7143 cmn_err(CE_WARN,
7144 "?%s%d: fca_hba_attach failed. Unable to create message "
7145 "log", DRIVER_NAME, ddiinst);
7146 goto failed;
7147
7148 }
7149 init_flag |= ATTACH_LOG;
7150
7151 /* We can begin to use EMLXS_MSGF from this point on */
7152
7153 /*
7154 * Find the I/O bus type If it is not a SBUS card,
7155 * then it is a PCI card. Default is PCI_FC (0).
7156 */
7157 prop_str = NULL;
7158 status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
7159 (dev_info_t *)dip, 0, "name", (char **)&prop_str);
7160
7161 if (status == DDI_PROP_SUCCESS) {
7162 if (strncmp(prop_str, "lpfs", 4) == 0) {
7163 hba->bus_type = SBUS_FC;
7164 }
7165
7166 (void) ddi_prop_free((void *)prop_str);
7167 }
7168
7169 /*
7170 * Copy DDS from the config method and update configuration parameters
7171 */
7172 (void) emlxs_get_props(hba);
7173
7174 #ifdef FMA_SUPPORT
7175 hba->fm_caps = cfg[CFG_FM_CAPS].current;
7176
7177 emlxs_fm_init(hba);
7178
7179 init_flag |= ATTACH_FM;
7180 #endif /* FMA_SUPPORT */
7181
7182 if (emlxs_map_bus(hba)) {
7183 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7184 "Unable to map memory");
7185 goto failed;
7186
7187 }
7188 init_flag |= ATTACH_MAP_BUS;
7189
7190 /* Attempt to identify the adapter */
7191 rval = emlxs_init_adapter_info(hba);
7192
7193 if (rval == 0) {
7194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7195 "Unable to get adapter info. Id:%d Vendor id:0x%x "
7196 "Device id:0x%x Model:%s", hba->model_info.id,
7197 hba->model_info.vendor_id, hba->model_info.device_id,
7198 hba->model_info.model);
7199 goto failed;
7200 }
7201
7202 /* Check if adapter is not supported */
7203 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
7204 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7205 "Unsupported adapter found. Id:%d Vendor id:0x%x "
7206 "Device id:0x%x SSDID:0x%x Model:%s", hba->model_info.id,
7207 hba->model_info.vendor_id, hba->model_info.device_id,
7208 hba->model_info.ssdid, hba->model_info.model);
7209 goto failed;
7210 }
7211
7212 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
7213 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
7214
7215 #ifdef EMLXS_I386
7216 /*
7217 * TigerShark has 64K limit for SG element size
7218 * Do this for x86 alone. For SPARC, the driver
7219 * breaks up the single SGE later on.
7220 */
7221 hba->dma_attr_ro.dma_attr_count_max = 0xffff;
7222
7223 i = cfg[CFG_MAX_XFER_SIZE].current;
7224 /* Update SGL size based on max_xfer_size */
7225 if (i > 516096) {
7226 /* 516096 = (((2048 / 16) - 2) * 4096) */
7227 hba->sli.sli4.mem_sgl_size = 4096;
7228 } else if (i > 253952) {
7229 /* 253952 = (((1024 / 16) - 2) * 4096) */
7230 hba->sli.sli4.mem_sgl_size = 2048;
7231 } else {
7232 hba->sli.sli4.mem_sgl_size = 1024;
7233 }
7234 #endif /* EMLXS_I386 */
7235
7236 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
7237 } else {
7238 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
7239
7240 #ifdef EMLXS_I386
7241 i = cfg[CFG_MAX_XFER_SIZE].current;
7242 /* Update BPL size based on max_xfer_size */
7243 if (i > 688128) {
7244 /* 688128 = (((2048 / 12) - 2) * 4096) */
7245 hba->sli.sli3.mem_bpl_size = 4096;
7246 } else if (i > 339968) {
7247 /* 339968 = (((1024 / 12) - 2) * 4096) */
7248 hba->sli.sli3.mem_bpl_size = 2048;
7249 } else {
7250 hba->sli.sli3.mem_bpl_size = 1024;
7251 }
7252 #endif /* EMLXS_I386 */
7253
7254 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
7255 }
7256
7257 /* Update dma_attr_sgllen based on true SGL length */
7258 hba->dma_attr.dma_attr_sgllen = i;
7259 hba->dma_attr_ro.dma_attr_sgllen = i;
7260 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
7261
7262 if (EMLXS_SLI_MAP_HDW(hba)) {
7263 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7264 "Unable to map memory");
7265 goto failed;
7266
7267 }
7268 init_flag |= ATTACH_MAP_SLI;
7269
7270 /* Initialize the interrupts. But don't add them yet */
7271 status = EMLXS_INTR_INIT(hba, 0);
7272 if (status != DDI_SUCCESS) {
7273 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7274 "Unable to initalize interrupt(s).");
7275 goto failed;
7276
7277 }
7278 init_flag |= ATTACH_INTR_INIT;
7279
7280 /* Initialize LOCKs */
7281 emlxs_msg_lock_reinit(hba);
7282 emlxs_lock_init(hba);
7283 init_flag |= ATTACH_LOCK;
7284
7285 /* Create the event queue */
7286 if (emlxs_event_queue_create(hba) == 0) {
7287 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7288 "Unable to create event queue");
7289
7290 goto failed;
7291
7292 }
7293 init_flag |= ATTACH_EVENTS;
7294
7295 /* Initialize the power management */
7296 mutex_enter(&EMLXS_PM_LOCK);
7297 hba->pm_state = EMLXS_PM_IN_ATTACH;
7298 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
7299 hba->pm_busy = 0;
7300 #ifdef IDLE_TIMER
7301 hba->pm_active = 1;
7302 hba->pm_idle_timer = 0;
7303 #endif /* IDLE_TIMER */
7304 mutex_exit(&EMLXS_PM_LOCK);
7305
7306 /* Set the pm component name */
7307 (void) snprintf(local_pm_components, sizeof (local_pm_components),
7308 "NAME=%s%d", DRIVER_NAME, ddiinst);
7309 emlxs_pm_components[0] = local_pm_components;
7310
7311 /* Check if power management support is enabled */
7312 if (cfg[CFG_PM_SUPPORT].current) {
7313 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
7314 "pm-components", emlxs_pm_components,
7315 sizeof (emlxs_pm_components) /
7316 sizeof (emlxs_pm_components[0])) !=
7317 DDI_PROP_SUCCESS) {
7318 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7319 "Unable to create pm components.");
7320 goto failed;
7321 }
7322 }
7323
7324 /* Needed for suspend and resume support */
7325 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
7326 "needs-suspend-resume");
7327 init_flag |= ATTACH_PROP;
7328
7329 emlxs_thread_spawn_create(hba);
7330 init_flag |= ATTACH_SPAWN;
7331
7332 emlxs_thread_create(hba, &hba->iodone_thread);
7333
7334 init_flag |= ATTACH_THREAD;
7335
7336 retry:
7337 /* Setup initiator / target ports */
7338 emlxs_mode_init(hba);
7339
7340 /* If driver did not attach to either stack, */
7341 /* then driver attach fails */
7342 if (port->mode == MODE_NONE) {
7343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7344 "Driver interfaces not enabled.");
7345 goto failed;
7346 }
7347
7348 /*
7349 * Initialize HBA
7350 */
7351
7352 /* Set initial state */
7353 mutex_enter(&EMLXS_PORT_LOCK);
7354 hba->flag |= FC_OFFLINE_MODE;
7355 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
7356 mutex_exit(&EMLXS_PORT_LOCK);
7357
7358 if (status = emlxs_online(hba)) {
7359 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7360 "Unable to initialize adapter.");
7361
7362 if (status == EAGAIN) {
7363 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7364 "Retrying adapter initialization ...");
7365 goto retry;
7366 }
7367 goto failed;
7368 }
7369 init_flag |= ATTACH_ONLINE;
7370
7371 /* This is to ensure that the model property is properly set */
7372 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
7373 hba->model_info.model);
7374
7375 /* Create the device node. */
7376 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
7377 DDI_FAILURE) {
7378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7379 "Unable to create device node.");
7380 goto failed;
7381 }
7382 init_flag |= ATTACH_NODE;
7383
7384 /* Attach initiator now */
7385 /* This must come after emlxs_online() */
7386 emlxs_fca_attach(hba);
7387 init_flag |= ATTACH_FCA;
7388
7389 /* Initialize kstat information */
7390 hba->kstat = kstat_create(DRIVER_NAME,
7391 ddiinst, "statistics", "controller",
7392 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
7393 KSTAT_FLAG_VIRTUAL);
7394
7395 if (hba->kstat == NULL) {
7396 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
7397 "kstat_create failed.");
7398 } else {
7399 hba->kstat->ks_data = (void *)&hba->stats;
7400 kstat_install(hba->kstat);
7401 init_flag |= ATTACH_KSTAT;
7402 }
7403
7404 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
7405 /* Setup virtual port properties */
7406 emlxs_read_vport_prop(hba);
7407 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
7408
7409
7410 #ifdef DHCHAP_SUPPORT
7411 emlxs_dhc_attach(hba);
7412 init_flag |= ATTACH_DHCHAP;
7413 #endif /* DHCHAP_SUPPORT */
7414
7415 /* Display the driver banner now */
7416 emlxs_drv_banner(hba);
7417
7418 /* Raise the power level */
7419
7420 /*
7421 * This will not execute emlxs_hba_resume because
7422 * EMLXS_PM_IN_ATTACH is set
7423 */
7424 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
7425 /* Set power up anyway. This should not happen! */
7426 mutex_enter(&EMLXS_PM_LOCK);
7427 hba->pm_level = EMLXS_PM_ADAPTER_UP;
7428 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7429 mutex_exit(&EMLXS_PM_LOCK);
7430 } else {
7431 mutex_enter(&EMLXS_PM_LOCK);
7432 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7433 mutex_exit(&EMLXS_PM_LOCK);
7434 }
7435
7436 #ifdef SFCT_SUPPORT
7437 if (port->flag & EMLXS_TGT_ENABLED) {
7438 /* Do this last */
7439 emlxs_fct_attach(hba);
7440 init_flag |= ATTACH_FCT;
7441 }
7442 #endif /* SFCT_SUPPORT */
7443
7444 return (DDI_SUCCESS);
7445
7446 failed:
7447
7448 emlxs_driver_remove(dip, init_flag, 1);
7449
7450 return (DDI_FAILURE);
7451
7452 } /* emlxs_hba_attach() */
7453
7454
7455 static int
emlxs_hba_detach(dev_info_t * dip)7456 emlxs_hba_detach(dev_info_t *dip)
7457 {
7458 emlxs_hba_t *hba;
7459 emlxs_port_t *port;
7460 int ddiinst;
7461 int count;
7462 uint32_t init_flag = (uint32_t)-1;
7463
7464 ddiinst = ddi_get_instance(dip);
7465 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
7466 port = &PPORT;
7467
7468 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
7469
7470 mutex_enter(&EMLXS_PM_LOCK);
7471 hba->pm_state |= EMLXS_PM_IN_DETACH;
7472 mutex_exit(&EMLXS_PM_LOCK);
7473
7474 /* Lower the power level */
7475 /*
7476 * This will not suspend the driver since the
7477 * EMLXS_PM_IN_DETACH has been set
7478 */
7479 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
7480 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7481 "Unable to lower power.");
7482
7483 mutex_enter(&EMLXS_PM_LOCK);
7484 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7485 mutex_exit(&EMLXS_PM_LOCK);
7486
7487 return (DDI_FAILURE);
7488 }
7489
7490 /* Take the adapter offline first, if not already */
7491 if (emlxs_offline(hba, 1) != 0) {
7492 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7493 "Unable to take adapter offline.");
7494
7495 mutex_enter(&EMLXS_PM_LOCK);
7496 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7497 mutex_exit(&EMLXS_PM_LOCK);
7498
7499 (void) emlxs_pm_raise_power(dip);
7500
7501 return (DDI_FAILURE);
7502 }
7503 /* Check ub buffer pools */
7504 if (port->ub_pool) {
7505 mutex_enter(&EMLXS_UB_LOCK);
7506
7507 /* Wait up to 10 seconds for all ub pools to be freed */
7508 count = 10 * 2;
7509 while (port->ub_pool && count) {
7510 mutex_exit(&EMLXS_UB_LOCK);
7511 delay(drv_usectohz(500000)); /* half second wait */
7512 count--;
7513 mutex_enter(&EMLXS_UB_LOCK);
7514 }
7515
7516 if (port->ub_pool) {
7517 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7518 "fca_unbind_port: Unsolicited buffers still "
7519 "active. port=%p. Destroying...", port);
7520
7521 /* Destroy all pools */
7522 while (port->ub_pool) {
7523 emlxs_ub_destroy(port, port->ub_pool);
7524 }
7525 }
7526
7527 mutex_exit(&EMLXS_UB_LOCK);
7528 }
7529 init_flag &= ~ATTACH_ONLINE;
7530
7531 /* Remove the driver instance */
7532 emlxs_driver_remove(dip, init_flag, 0);
7533
7534 return (DDI_SUCCESS);
7535
7536 } /* emlxs_hba_detach() */
7537
7538
7539 extern int
emlxs_map_bus(emlxs_hba_t * hba)7540 emlxs_map_bus(emlxs_hba_t *hba)
7541 {
7542 emlxs_port_t *port = &PPORT;
7543 dev_info_t *dip;
7544 ddi_device_acc_attr_t dev_attr;
7545 int status;
7546
7547 dip = (dev_info_t *)hba->dip;
7548 dev_attr = emlxs_dev_acc_attr;
7549
7550 if (hba->bus_type == SBUS_FC) {
7551 if (hba->pci_acc_handle == 0) {
7552 status = ddi_regs_map_setup(dip,
7553 SBUS_DFLY_PCI_CFG_RINDEX,
7554 (caddr_t *)&hba->pci_addr,
7555 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7556 if (status != DDI_SUCCESS) {
7557 EMLXS_MSGF(EMLXS_CONTEXT,
7558 &emlxs_attach_failed_msg,
7559 "(SBUS) ddi_regs_map_setup PCI failed. "
7560 "status=%x", status);
7561 goto failed;
7562 }
7563 }
7564
7565 if (hba->sbus_pci_handle == 0) {
7566 status = ddi_regs_map_setup(dip,
7567 SBUS_TITAN_PCI_CFG_RINDEX,
7568 (caddr_t *)&hba->sbus_pci_addr,
7569 0, 0, &dev_attr, &hba->sbus_pci_handle);
7570 if (status != DDI_SUCCESS) {
7571 EMLXS_MSGF(EMLXS_CONTEXT,
7572 &emlxs_attach_failed_msg,
7573 "(SBUS) ddi_regs_map_setup TITAN PCI "
7574 "failed. status=%x", status);
7575 goto failed;
7576 }
7577 }
7578
7579 } else { /* ****** PCI ****** */
7580
7581 if (hba->pci_acc_handle == 0) {
7582 status = ddi_regs_map_setup(dip,
7583 PCI_CFG_RINDEX,
7584 (caddr_t *)&hba->pci_addr,
7585 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7586 if (status != DDI_SUCCESS) {
7587 EMLXS_MSGF(EMLXS_CONTEXT,
7588 &emlxs_attach_failed_msg,
7589 "(PCI) ddi_regs_map_setup PCI failed. "
7590 "status=%x", status);
7591 goto failed;
7592 }
7593 }
7594 #ifdef EMLXS_I386
7595 /* Setting up PCI configure space */
7596 (void) ddi_put16(hba->pci_acc_handle,
7597 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
7598 CMD_CFG_VALUE | CMD_IO_ENBL);
7599
7600 #ifdef FMA_SUPPORT
7601 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
7602 != DDI_FM_OK) {
7603 EMLXS_MSGF(EMLXS_CONTEXT,
7604 &emlxs_invalid_access_handle_msg, NULL);
7605 goto failed;
7606 }
7607 #endif /* FMA_SUPPORT */
7608
7609 #endif /* EMLXS_I386 */
7610
7611 }
7612 return (0);
7613
7614 failed:
7615
7616 emlxs_unmap_bus(hba);
7617 return (ENOMEM);
7618
7619 } /* emlxs_map_bus() */
7620
7621
7622 extern void
emlxs_unmap_bus(emlxs_hba_t * hba)7623 emlxs_unmap_bus(emlxs_hba_t *hba)
7624 {
7625 if (hba->pci_acc_handle) {
7626 (void) ddi_regs_map_free(&hba->pci_acc_handle);
7627 hba->pci_acc_handle = 0;
7628 }
7629
7630 if (hba->sbus_pci_handle) {
7631 (void) ddi_regs_map_free(&hba->sbus_pci_handle);
7632 hba->sbus_pci_handle = 0;
7633 }
7634
7635 return;
7636
7637 } /* emlxs_unmap_bus() */
7638
7639
7640 static int
emlxs_get_props(emlxs_hba_t * hba)7641 emlxs_get_props(emlxs_hba_t *hba)
7642 {
7643 emlxs_config_t *cfg;
7644 uint32_t i;
7645 char string[256];
7646 uint32_t new_value;
7647
7648 /* Initialize each parameter */
7649 for (i = 0; i < NUM_CFG_PARAM; i++) {
7650 cfg = &hba->config[i];
7651
7652 /* Ensure strings are terminated */
7653 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
7654 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0;
7655
7656 /* Set the current value to the default value */
7657 new_value = cfg->def;
7658
7659 /* First check for the global setting */
7660 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7661 (void *)hba->dip, DDI_PROP_DONTPASS,
7662 cfg->string, new_value);
7663
7664 /* Now check for the per adapter ddiinst setting */
7665 (void) snprintf(string, sizeof (string), "%s%d-%s", DRIVER_NAME,
7666 hba->ddiinst, cfg->string);
7667
7668 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7669 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
7670
7671 /* Now check the parameter */
7672 cfg->current = emlxs_check_parm(hba, i, new_value);
7673 }
7674
7675 return (0);
7676
7677 } /* emlxs_get_props() */
7678
7679
7680 extern uint32_t
emlxs_check_parm(emlxs_hba_t * hba,uint32_t index,uint32_t new_value)7681 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7682 {
7683 emlxs_port_t *port = &PPORT;
7684 uint32_t i;
7685 emlxs_config_t *cfg;
7686 emlxs_vpd_t *vpd = &VPD;
7687
7688 if (index >= NUM_CFG_PARAM) {
7689 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7690 "check_parm failed. Invalid index = %d", index);
7691
7692 return (new_value);
7693 }
7694
7695 cfg = &hba->config[index];
7696
7697 if (new_value > cfg->hi) {
7698 new_value = cfg->def;
7699 } else if (new_value < cfg->low) {
7700 new_value = cfg->def;
7701 }
7702
7703 /* Perform additional checks */
7704 switch (index) {
7705 #ifdef SFCT_SUPPORT
7706 case CFG_NPIV_ENABLE:
7707 if (hba->config[CFG_TARGET_MODE].current &&
7708 hba->config[CFG_DTM_ENABLE].current == 0) {
7709 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7710 "enable-npiv: Not supported in pure target mode. "
7711 "Disabling.");
7712
7713 new_value = 0;
7714 }
7715 break;
7716 #endif /* SFCT_SUPPORT */
7717
7718
7719 case CFG_NUM_NODES:
7720 switch (new_value) {
7721 case 1:
7722 case 2:
7723 /* Must have at least 3 if not 0 */
7724 return (3);
7725
7726 default:
7727 break;
7728 }
7729 break;
7730
7731 case CFG_FW_CHECK:
7732 /* The 0x2 bit implies the 0x1 bit will also be set */
7733 if (new_value & 0x2) {
7734 new_value |= 0x1;
7735 }
7736
7737 /* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
7738 if (!(new_value & 0x3) && (new_value & 0x4)) {
7739 new_value &= ~0x4;
7740 }
7741 break;
7742
7743 case CFG_LINK_SPEED:
7744 if ((new_value > 8) &&
7745 (hba->config[CFG_TOPOLOGY].current == 4)) {
7746 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7747 "link-speed: %dGb not supported in loop topology. "
7748 "Switching to auto detect.",
7749 new_value);
7750
7751 new_value = 0;
7752 break;
7753 }
7754
7755 if (vpd->link_speed) {
7756 switch (new_value) {
7757 case 0:
7758 break;
7759
7760 case 1:
7761 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
7762 new_value = 0;
7763
7764 EMLXS_MSGF(EMLXS_CONTEXT,
7765 &emlxs_init_msg,
7766 "link-speed: 1Gb not supported "
7767 "by adapter. Switching to auto "
7768 "detect.");
7769 }
7770 break;
7771
7772 case 2:
7773 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
7774 new_value = 0;
7775
7776 EMLXS_MSGF(EMLXS_CONTEXT,
7777 &emlxs_init_msg,
7778 "link-speed: 2Gb not supported "
7779 "by adapter. Switching to auto "
7780 "detect.");
7781 }
7782 break;
7783
7784 case 4:
7785 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
7786 new_value = 0;
7787
7788 EMLXS_MSGF(EMLXS_CONTEXT,
7789 &emlxs_init_msg,
7790 "link-speed: 4Gb not supported "
7791 "by adapter. Switching to auto "
7792 "detect.");
7793 }
7794 break;
7795
7796 case 8:
7797 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
7798 new_value = 0;
7799
7800 EMLXS_MSGF(EMLXS_CONTEXT,
7801 &emlxs_init_msg,
7802 "link-speed: 8Gb not supported "
7803 "by adapter. Switching to auto "
7804 "detect.");
7805 }
7806 break;
7807
7808 case 16:
7809 if (!(vpd->link_speed & LMT_16GB_CAPABLE)) {
7810 new_value = 0;
7811
7812 EMLXS_MSGF(EMLXS_CONTEXT,
7813 &emlxs_init_msg,
7814 "link-speed: 16Gb not supported "
7815 "by adapter. Switching to auto "
7816 "detect.");
7817 }
7818 break;
7819
7820 case 32:
7821 if (!(vpd->link_speed & LMT_32GB_CAPABLE)) {
7822 new_value = 0;
7823
7824 EMLXS_MSGF(EMLXS_CONTEXT,
7825 &emlxs_init_msg,
7826 "link-speed: 32Gb not supported "
7827 "by adapter. Switching to auto "
7828 "detect.");
7829 }
7830 break;
7831
7832 default:
7833 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7834 "link-speed: Invalid value=%d provided. "
7835 "Switching to auto detect.",
7836 new_value);
7837
7838 new_value = 0;
7839 }
7840 } else { /* Perform basic validity check */
7841
7842 /* Perform additional check on link speed */
7843 switch (new_value) {
7844 case 0:
7845 case 1:
7846 case 2:
7847 case 4:
7848 case 8:
7849 case 16:
7850 /* link-speed is a valid choice */
7851 break;
7852
7853 default:
7854 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7855 "link-speed: Invalid value=%d provided. "
7856 "Switching to auto detect.",
7857 new_value);
7858
7859 new_value = 0;
7860 }
7861 }
7862 break;
7863
7864 case CFG_TOPOLOGY:
7865 if ((new_value == 4) &&
7866 (hba->config[CFG_LINK_SPEED].current > 8)) {
7867 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7868 "topology: Loop topology not supported "
7869 "with link speeds greater than 8Gb. "
7870 "Switching to auto detect.");
7871
7872 new_value = 0;
7873 break;
7874 }
7875
7876 /* Perform additional check on topology */
7877 switch (new_value) {
7878 case 0:
7879 case 2:
7880 case 4:
7881 case 6:
7882 /* topology is a valid choice */
7883 break;
7884
7885 default:
7886 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7887 "topology: Invalid value=%d provided. "
7888 "Switching to auto detect.",
7889 new_value);
7890
7891 new_value = 0;
7892 break;
7893 }
7894 break;
7895
7896 #ifdef DHCHAP_SUPPORT
7897 case CFG_AUTH_TYPE:
7898 {
7899 uint32_t shift;
7900 uint32_t mask;
7901
7902 /* Perform additional check on auth type */
7903 shift = 12;
7904 mask = 0xF000;
7905 for (i = 0; i < 4; i++) {
7906 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7907 return (cfg->def);
7908 }
7909
7910 shift -= 4;
7911 mask >>= 4;
7912 }
7913 break;
7914 }
7915
7916 case CFG_AUTH_HASH:
7917 {
7918 uint32_t shift;
7919 uint32_t mask;
7920
7921 /* Perform additional check on auth hash */
7922 shift = 12;
7923 mask = 0xF000;
7924 for (i = 0; i < 4; i++) {
7925 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7926 return (cfg->def);
7927 }
7928
7929 shift -= 4;
7930 mask >>= 4;
7931 }
7932 break;
7933 }
7934
7935 case CFG_AUTH_GROUP:
7936 {
7937 uint32_t shift;
7938 uint32_t mask;
7939
7940 /* Perform additional check on auth group */
7941 shift = 28;
7942 mask = 0xF0000000;
7943 for (i = 0; i < 8; i++) {
7944 if (((new_value & mask) >> shift) >
7945 DFC_AUTH_GROUP_MAX) {
7946 return (cfg->def);
7947 }
7948
7949 shift -= 4;
7950 mask >>= 4;
7951 }
7952 break;
7953 }
7954
7955 case CFG_AUTH_INTERVAL:
7956 if (new_value < 10) {
7957 return (10);
7958 }
7959 break;
7960
7961
7962 #endif /* DHCHAP_SUPPORT */
7963
7964 } /* switch */
7965
7966 return (new_value);
7967
7968 } /* emlxs_check_parm() */
7969
7970
7971 extern uint32_t
emlxs_set_parm(emlxs_hba_t * hba,uint32_t index,uint32_t new_value)7972 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7973 {
7974 emlxs_port_t *port = &PPORT;
7975 emlxs_port_t *vport;
7976 uint32_t vpi;
7977 emlxs_config_t *cfg;
7978 uint32_t old_value;
7979
7980 if (index >= NUM_CFG_PARAM) {
7981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7982 "set_parm failed. Invalid index = %d", index);
7983
7984 return ((uint32_t)FC_FAILURE);
7985 }
7986
7987 cfg = &hba->config[index];
7988
7989 if (!(cfg->flags & PARM_DYNAMIC)) {
7990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7991 "set_parm failed. %s is not dynamic.", cfg->string);
7992
7993 return ((uint32_t)FC_FAILURE);
7994 }
7995
7996 /* Check new value */
7997 old_value = new_value;
7998 new_value = emlxs_check_parm(hba, index, new_value);
7999
8000 if (old_value != new_value) {
8001 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
8002 "set_parm: %s invalid. 0x%x --> 0x%x",
8003 cfg->string, old_value, new_value);
8004 }
8005
8006 /* Return now if no actual change */
8007 if (new_value == cfg->current) {
8008 return (FC_SUCCESS);
8009 }
8010
8011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
8012 "set_parm: %s changing. 0x%x --> 0x%x",
8013 cfg->string, cfg->current, new_value);
8014
8015 old_value = cfg->current;
8016 cfg->current = new_value;
8017
8018 /* React to change if needed */
8019 switch (index) {
8020
8021 case CFG_PCI_MAX_READ:
8022 /* Update MXR */
8023 emlxs_pcix_mxr_update(hba, 1);
8024 break;
8025
8026 #ifdef SFCT_SUPPORT
8027 case CFG_TARGET_MODE:
8028 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8029 break;
8030 #endif /* SFCT_SUPPORT */
8031
8032 case CFG_SLI_MODE:
8033 /* Check SLI mode */
8034 if ((hba->sli_mode == 3) && (new_value == 2)) {
8035 /* All vports must be disabled first */
8036 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8037 vport = &VPORT(vpi);
8038
8039 if (vport->flag & EMLXS_PORT_ENABLED) {
8040 /* Reset current value */
8041 cfg->current = old_value;
8042
8043 EMLXS_MSGF(EMLXS_CONTEXT,
8044 &emlxs_sfs_debug_msg,
8045 "set_parm failed. %s: vpi=%d "
8046 "still enabled. Value restored to "
8047 "0x%x.", cfg->string, vpi,
8048 old_value);
8049
8050 return (2);
8051 }
8052 }
8053 }
8054
8055 if ((hba->sli_mode >= 4) && (new_value < 4)) {
8056 /*
8057 * Not allow to set to SLI 2 or 3 if HBA supports SLI4
8058 */
8059 cfg->current = old_value;
8060 return ((uint32_t)FC_FAILURE);
8061 }
8062
8063 break;
8064
8065 case CFG_NPIV_ENABLE:
8066 /* Check if NPIV is being disabled */
8067 if ((old_value == 1) && (new_value == 0)) {
8068 /* All vports must be disabled first */
8069 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8070 vport = &VPORT(vpi);
8071
8072 if (vport->flag & EMLXS_PORT_ENABLED) {
8073 /* Reset current value */
8074 cfg->current = old_value;
8075
8076 EMLXS_MSGF(EMLXS_CONTEXT,
8077 &emlxs_sfs_debug_msg,
8078 "set_parm failed. %s: vpi=%d "
8079 "still enabled. Value restored to "
8080 "0x%x.", cfg->string, vpi,
8081 old_value);
8082
8083 return (2);
8084 }
8085 }
8086 }
8087
8088 /* Trigger adapter reset */
8089 /* (void) emlxs_reset(port, FC_FCA_RESET); */
8090
8091 break;
8092
8093
8094 case CFG_VPORT_RESTRICTED:
8095 for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
8096 vport = &VPORT(vpi);
8097
8098 if (!(vport->flag & EMLXS_PORT_CONFIG)) {
8099 continue;
8100 }
8101
8102 if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
8103 continue;
8104 }
8105
8106 if (new_value) {
8107 vport->flag |= EMLXS_PORT_RESTRICTED;
8108 } else {
8109 vport->flag &= ~EMLXS_PORT_RESTRICTED;
8110 }
8111 }
8112
8113 break;
8114
8115 #ifdef DHCHAP_SUPPORT
8116 case CFG_AUTH_ENABLE:
8117 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8118 break;
8119
8120 case CFG_AUTH_TMO:
8121 hba->auth_cfg.authentication_timeout = cfg->current;
8122 break;
8123
8124 case CFG_AUTH_MODE:
8125 hba->auth_cfg.authentication_mode = cfg->current;
8126 break;
8127
8128 case CFG_AUTH_BIDIR:
8129 hba->auth_cfg.bidirectional = cfg->current;
8130 break;
8131
8132 case CFG_AUTH_TYPE:
8133 hba->auth_cfg.authentication_type_priority[0] =
8134 (cfg->current & 0xF000) >> 12;
8135 hba->auth_cfg.authentication_type_priority[1] =
8136 (cfg->current & 0x0F00) >> 8;
8137 hba->auth_cfg.authentication_type_priority[2] =
8138 (cfg->current & 0x00F0) >> 4;
8139 hba->auth_cfg.authentication_type_priority[3] =
8140 (cfg->current & 0x000F);
8141 break;
8142
8143 case CFG_AUTH_HASH:
8144 hba->auth_cfg.hash_priority[0] =
8145 (cfg->current & 0xF000) >> 12;
8146 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
8147 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
8148 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
8149 break;
8150
8151 case CFG_AUTH_GROUP:
8152 hba->auth_cfg.dh_group_priority[0] =
8153 (cfg->current & 0xF0000000) >> 28;
8154 hba->auth_cfg.dh_group_priority[1] =
8155 (cfg->current & 0x0F000000) >> 24;
8156 hba->auth_cfg.dh_group_priority[2] =
8157 (cfg->current & 0x00F00000) >> 20;
8158 hba->auth_cfg.dh_group_priority[3] =
8159 (cfg->current & 0x000F0000) >> 16;
8160 hba->auth_cfg.dh_group_priority[4] =
8161 (cfg->current & 0x0000F000) >> 12;
8162 hba->auth_cfg.dh_group_priority[5] =
8163 (cfg->current & 0x00000F00) >> 8;
8164 hba->auth_cfg.dh_group_priority[6] =
8165 (cfg->current & 0x000000F0) >> 4;
8166 hba->auth_cfg.dh_group_priority[7] =
8167 (cfg->current & 0x0000000F);
8168 break;
8169
8170 case CFG_AUTH_INTERVAL:
8171 hba->auth_cfg.reauthenticate_time_interval = cfg->current;
8172 break;
8173 #endif /* DHCHAP_SUPPORT */
8174
8175 }
8176
8177 return (FC_SUCCESS);
8178
8179 } /* emlxs_set_parm() */
8180
8181
8182 /*
8183 * emlxs_mem_alloc OS specific routine for memory allocation / mapping
8184 *
8185 * The buf_info->flags field describes the memory operation requested.
8186 *
8187 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA
8188 * Virtual address is supplied in buf_info->virt
8189 * DMA mapping flag is in buf_info->align
8190 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
8191 * The mapped physical address is returned buf_info->phys
8192 *
8193 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
8194 * if FC_MBUF_DMA is set the memory is also mapped for DMA
8195 * The byte alignment of the memory request is supplied in buf_info->align
8196 * The byte size of the memory request is supplied in buf_info->size
8197 * The virtual address is returned buf_info->virt
8198 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
8199 */
8200 extern uint8_t *
emlxs_mem_alloc(emlxs_hba_t * hba,MBUF_INFO * buf_info)8201 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8202 {
8203 emlxs_port_t *port = &PPORT;
8204 ddi_dma_attr_t dma_attr;
8205 ddi_device_acc_attr_t dev_attr;
8206 uint_t cookie_count;
8207 size_t dma_reallen;
8208 ddi_dma_cookie_t dma_cookie;
8209 uint_t dma_flag;
8210 int status;
8211
8212 dma_attr = hba->dma_attr_1sg;
8213 dev_attr = emlxs_data_acc_attr;
8214
8215 if (buf_info->flags & FC_MBUF_SNGLSG) {
8216 dma_attr.dma_attr_sgllen = 1;
8217 }
8218
8219 if (buf_info->flags & FC_MBUF_PHYSONLY) {
8220
8221 if (buf_info->virt == NULL) {
8222 goto done;
8223 }
8224
8225 /*
8226 * Allocate the DMA handle for this DMA object
8227 */
8228 status = ddi_dma_alloc_handle((void *)hba->dip,
8229 &dma_attr, DDI_DMA_DONTWAIT,
8230 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
8231 if (status != DDI_SUCCESS) {
8232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8233 "ddi_dma_alloc_handle failed: size=%x align=%x "
8234 "flags=%x", buf_info->size, buf_info->align,
8235 buf_info->flags);
8236
8237 buf_info->phys = 0;
8238 buf_info->dma_handle = 0;
8239 goto done;
8240 }
8241
8242 switch (buf_info->align) {
8243 case DMA_READ_WRITE:
8244 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
8245 break;
8246 case DMA_READ_ONLY:
8247 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
8248 break;
8249 case DMA_WRITE_ONLY:
8250 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
8251 break;
8252 default:
8253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8254 "Invalid DMA flag");
8255 (void) ddi_dma_free_handle(
8256 (ddi_dma_handle_t *)&buf_info->dma_handle);
8257 buf_info->phys = 0;
8258 buf_info->dma_handle = 0;
8259 return ((uint8_t *)buf_info->virt);
8260 }
8261
8262 /* Map this page of memory */
8263 status = ddi_dma_addr_bind_handle(
8264 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8265 (caddr_t)buf_info->virt, (size_t)buf_info->size,
8266 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
8267 &cookie_count);
8268
8269 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8270 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8271 "ddi_dma_addr_bind_handle failed: status=%x "
8272 "count=%x flags=%x", status, cookie_count,
8273 buf_info->flags);
8274
8275 (void) ddi_dma_free_handle(
8276 (ddi_dma_handle_t *)&buf_info->dma_handle);
8277 buf_info->phys = 0;
8278 buf_info->dma_handle = 0;
8279 goto done;
8280 }
8281
8282 if (hba->bus_type == SBUS_FC) {
8283
8284 int32_t burstsizes_limit = 0xff;
8285 int32_t ret_burst;
8286
8287 ret_burst = ddi_dma_burstsizes(
8288 buf_info->dma_handle) & burstsizes_limit;
8289 if (ddi_dma_set_sbus64(buf_info->dma_handle,
8290 ret_burst) == DDI_FAILURE) {
8291 EMLXS_MSGF(EMLXS_CONTEXT,
8292 &emlxs_mem_alloc_failed_msg,
8293 "ddi_dma_set_sbus64 failed.");
8294 }
8295 }
8296
8297 /* Save Physical address */
8298 buf_info->phys = dma_cookie.dmac_laddress;
8299
8300 /*
8301 * Just to be sure, let's add this
8302 */
8303 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8304 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8305
8306 } else if (buf_info->flags & FC_MBUF_DMA) {
8307
8308 dma_attr.dma_attr_align = buf_info->align;
8309
8310 /*
8311 * Allocate the DMA handle for this DMA object
8312 */
8313 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
8314 DDI_DMA_DONTWAIT, NULL,
8315 (ddi_dma_handle_t *)&buf_info->dma_handle);
8316 if (status != DDI_SUCCESS) {
8317 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8318 "ddi_dma_alloc_handle failed: size=%x align=%x "
8319 "flags=%x", buf_info->size, buf_info->align,
8320 buf_info->flags);
8321
8322 buf_info->virt = NULL;
8323 buf_info->phys = 0;
8324 buf_info->data_handle = 0;
8325 buf_info->dma_handle = 0;
8326 goto done;
8327 }
8328
8329 status = ddi_dma_mem_alloc(
8330 (ddi_dma_handle_t)buf_info->dma_handle,
8331 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
8332 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
8333 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
8334
8335 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
8336 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8337 "ddi_dma_mem_alloc failed: size=%x align=%x "
8338 "flags=%x", buf_info->size, buf_info->align,
8339 buf_info->flags);
8340
8341 (void) ddi_dma_free_handle(
8342 (ddi_dma_handle_t *)&buf_info->dma_handle);
8343
8344 buf_info->virt = NULL;
8345 buf_info->phys = 0;
8346 buf_info->data_handle = 0;
8347 buf_info->dma_handle = 0;
8348 goto done;
8349 }
8350
8351 /* Map this page of memory */
8352 status = ddi_dma_addr_bind_handle(
8353 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8354 (caddr_t)buf_info->virt, (size_t)buf_info->size,
8355 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
8356 &dma_cookie, &cookie_count);
8357
8358 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8359 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8360 "ddi_dma_addr_bind_handle failed: status=%x "
8361 "count=%d size=%x align=%x flags=%x", status,
8362 cookie_count, buf_info->size, buf_info->align,
8363 buf_info->flags);
8364
8365 (void) ddi_dma_mem_free(
8366 (ddi_acc_handle_t *)&buf_info->data_handle);
8367 (void) ddi_dma_free_handle(
8368 (ddi_dma_handle_t *)&buf_info->dma_handle);
8369
8370 buf_info->virt = NULL;
8371 buf_info->phys = 0;
8372 buf_info->dma_handle = 0;
8373 buf_info->data_handle = 0;
8374 goto done;
8375 }
8376
8377 if (hba->bus_type == SBUS_FC) {
8378 int32_t burstsizes_limit = 0xff;
8379 int32_t ret_burst;
8380
8381 ret_burst =
8382 ddi_dma_burstsizes(buf_info->
8383 dma_handle) & burstsizes_limit;
8384 if (ddi_dma_set_sbus64(buf_info->dma_handle,
8385 ret_burst) == DDI_FAILURE) {
8386 EMLXS_MSGF(EMLXS_CONTEXT,
8387 &emlxs_mem_alloc_failed_msg,
8388 "ddi_dma_set_sbus64 failed.");
8389 }
8390 }
8391
8392 /* Save Physical address */
8393 buf_info->phys = dma_cookie.dmac_laddress;
8394
8395 /* Just to be sure, let's add this */
8396 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8397 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8398
8399 } else { /* allocate virtual memory */
8400
8401 buf_info->virt =
8402 kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
8403 buf_info->phys = 0;
8404 buf_info->data_handle = 0;
8405 buf_info->dma_handle = 0;
8406
8407 if (buf_info->virt == (uint32_t *)0) {
8408 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8409 "size=%x flags=%x", buf_info->size,
8410 buf_info->flags);
8411 }
8412
8413 }
8414
8415 done:
8416
8417 return ((uint8_t *)buf_info->virt);
8418
8419 } /* emlxs_mem_alloc() */
8420
8421
8422
8423 /*
8424 * emlxs_mem_free:
8425 *
8426 * OS specific routine for memory de-allocation / unmapping
8427 *
8428 * The buf_info->flags field describes the memory operation requested.
8429 *
8430 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped
8431 * for DMA, but not freed. The mapped physical address to be unmapped is in
8432 * buf_info->phys
8433 *
8434 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
8435 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
8436 * buf_info->phys. The virtual address to be freed is in buf_info->virt
8437 */
8438 /*ARGSUSED*/
8439 extern void
emlxs_mem_free(emlxs_hba_t * hba,MBUF_INFO * buf_info)8440 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8441 {
8442 if (buf_info->flags & FC_MBUF_PHYSONLY) {
8443
8444 if (buf_info->dma_handle) {
8445 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
8446 (void) ddi_dma_free_handle(
8447 (ddi_dma_handle_t *)&buf_info->dma_handle);
8448 buf_info->dma_handle = NULL;
8449 }
8450
8451 } else if (buf_info->flags & FC_MBUF_DMA) {
8452
8453 if (buf_info->dma_handle) {
8454 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
8455 if (buf_info->data_handle) {
8456 (void) ddi_dma_mem_free(
8457 (ddi_acc_handle_t *)&buf_info->data_handle);
8458 }
8459 (void) ddi_dma_free_handle(
8460 (ddi_dma_handle_t *)&buf_info->dma_handle);
8461 buf_info->dma_handle = NULL;
8462 buf_info->data_handle = NULL;
8463 }
8464
8465 } else { /* allocate virtual memory */
8466
8467 if (buf_info->virt) {
8468 kmem_free(buf_info->virt, (size_t)buf_info->size);
8469 buf_info->virt = NULL;
8470 }
8471 }
8472
8473 } /* emlxs_mem_free() */
8474
8475
8476 static int
emlxs_select_fcp_channel(emlxs_hba_t * hba,NODELIST * ndlp,int reset)8477 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
8478 {
8479 int channel;
8480 int msi_id;
8481
8482
8483 /* IO to FCP2 device or a device reset always use fcp channel */
8484 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
8485 return (hba->channel_fcp);
8486 }
8487
8488
8489 msi_id = emlxs_select_msiid(hba);
8490 channel = emlxs_msiid_to_chan(hba, msi_id);
8491
8492
8493
8494 /* If channel is closed, then try fcp channel */
8495 if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
8496 channel = hba->channel_fcp;
8497 }
8498 return (channel);
8499
8500 } /* emlxs_select_fcp_channel() */
8501
8502
8503 static int32_t
emlxs_fast_target_reset(emlxs_port_t * port,emlxs_buf_t * sbp,NODELIST * ndlp)8504 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
8505 {
8506 emlxs_hba_t *hba = HBA;
8507 fc_packet_t *pkt;
8508 emlxs_config_t *cfg;
8509 MAILBOXQ *mbq;
8510 MAILBOX *mb;
8511 uint32_t rc;
8512
8513 /*
8514 * This routine provides a alternative target reset provessing
8515 * method. Instead of sending an actual target reset to the
8516 * NPort, we will first unreg the login to that NPort. This
8517 * will cause all the outstanding IOs the quickly complete with
8518 * a NO RPI local error. Next we will force the ULP to relogin
8519 * to the NPort by sending an RSCN (for that NPort) to the
8520 * upper layer. This method should result in a fast target
8521 * reset, as far as IOs completing; however, since an actual
8522 * target reset is not sent to the NPort, it is not 100%
8523 * compatable. Things like reservations will not be broken.
8524 * By default this option is DISABLED, and its only enabled thru
8525 * a hidden configuration parameter (fast-tgt-reset).
8526 */
8527 rc = FC_TRAN_BUSY;
8528 pkt = PRIV2PKT(sbp);
8529 cfg = &CFG;
8530
8531 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
8532 /* issue the mbox cmd to the sli */
8533 mb = (MAILBOX *) mbq->mbox;
8534 bzero((void *) mb, MAILBOX_CMD_BSIZE);
8535 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
8536 #ifdef SLI3_SUPPORT
8537 mb->un.varUnregLogin.vpi = port->vpi;
8538 #endif /* SLI3_SUPPORT */
8539 mb->mbxCommand = MBX_UNREG_LOGIN;
8540 mb->mbxOwner = OWN_HOST;
8541
8542 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8543 "Fast Target Reset: unreg rpi=%d tmr=%d", ndlp->nlp_Rpi,
8544 cfg[CFG_FAST_TGT_RESET_TMR].current);
8545
8546 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
8547 == MBX_SUCCESS) {
8548
8549 ndlp->nlp_Rpi = 0;
8550
8551 mutex_enter(&sbp->mtx);
8552 sbp->node = (void *)ndlp;
8553 sbp->did = ndlp->nlp_DID;
8554 mutex_exit(&sbp->mtx);
8555
8556 if (pkt->pkt_rsplen) {
8557 bzero((uint8_t *)pkt->pkt_resp,
8558 pkt->pkt_rsplen);
8559 }
8560 if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
8561 ndlp->nlp_force_rscn = hba->timer_tics +
8562 cfg[CFG_FAST_TGT_RESET_TMR].current;
8563 }
8564
8565 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
8566 }
8567
8568 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
8569 rc = FC_SUCCESS;
8570 }
8571 return (rc);
8572 } /* emlxs_fast_target_reset() */
8573
8574 static int32_t
emlxs_send_fcp_cmd(emlxs_port_t * port,emlxs_buf_t * sbp,uint32_t * pkt_flags)8575 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags)
8576 {
8577 emlxs_hba_t *hba = HBA;
8578 fc_packet_t *pkt;
8579 emlxs_config_t *cfg;
8580 IOCBQ *iocbq;
8581 IOCB *iocb;
8582 CHANNEL *cp;
8583 NODELIST *ndlp;
8584 char *cmd;
8585 uint16_t lun;
8586 FCP_CMND *fcp_cmd;
8587 uint32_t did;
8588 uint32_t reset = 0;
8589 int channel;
8590 int32_t rval;
8591
8592 pkt = PRIV2PKT(sbp);
8593 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8594
8595 /* Find target node object */
8596 ndlp = emlxs_node_find_did(port, did, 1);
8597
8598 if (!ndlp || !ndlp->nlp_active) {
8599 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8600 "Node not found. did=%x", did);
8601
8602 return (FC_BADPACKET);
8603 }
8604
8605 /* When the fcp channel is closed we stop accepting any FCP cmd */
8606 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8607 return (FC_TRAN_BUSY);
8608 }
8609
8610 /* Snoop for target or lun reset first */
8611 /* We always use FCP channel to send out target/lun reset fcp cmds */
8612 /* interrupt affinity only applies to non tgt lun reset fcp cmd */
8613
8614 cmd = (char *)pkt->pkt_cmd;
8615 lun = *((uint16_t *)cmd);
8616 lun = LE_SWAP16(lun);
8617
8618 iocbq = &sbp->iocbq;
8619 iocb = &iocbq->iocb;
8620 iocbq->node = (void *) ndlp;
8621
8622 /* Check for target reset */
8623 if (cmd[10] & 0x20) {
8624 /* prepare iocb */
8625 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8626 hba->channel_fcp)) != FC_SUCCESS) {
8627
8628 if (rval == 0xff) {
8629 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8630 0, 1);
8631 rval = FC_SUCCESS;
8632 }
8633
8634 return (rval);
8635 }
8636
8637 mutex_enter(&sbp->mtx);
8638 sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
8639 sbp->pkt_flags |= PACKET_POLLED;
8640 *pkt_flags = sbp->pkt_flags;
8641 mutex_exit(&sbp->mtx);
8642
8643 #ifdef SAN_DIAG_SUPPORT
8644 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
8645 (HBA_WWN *)&ndlp->nlp_portname, -1);
8646 #endif /* SAN_DIAG_SUPPORT */
8647
8648 iocbq->flag |= IOCB_PRIORITY;
8649
8650 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8651 "Target Reset: did=%x", did);
8652
8653 cfg = &CFG;
8654 if (cfg[CFG_FAST_TGT_RESET].current) {
8655 if (emlxs_fast_target_reset(port, sbp, ndlp) ==
8656 FC_SUCCESS) {
8657 return (FC_SUCCESS);
8658 }
8659 }
8660
8661 /* Close the node for any further normal IO */
8662 emlxs_node_close(port, ndlp, hba->channel_fcp,
8663 pkt->pkt_timeout);
8664
8665 /* Flush the IO's on the tx queues */
8666 (void) emlxs_tx_node_flush(port, ndlp,
8667 &hba->chan[hba->channel_fcp], 0, sbp);
8668
8669 /* This is the target reset fcp cmd */
8670 reset = 1;
8671 }
8672
8673 /* Check for lun reset */
8674 else if (cmd[10] & 0x10) {
8675 /* prepare iocb */
8676 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8677 hba->channel_fcp)) != FC_SUCCESS) {
8678
8679 if (rval == 0xff) {
8680 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8681 0, 1);
8682 rval = FC_SUCCESS;
8683 }
8684
8685 return (rval);
8686 }
8687
8688 mutex_enter(&sbp->mtx);
8689 sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
8690 sbp->pkt_flags |= PACKET_POLLED;
8691 *pkt_flags = sbp->pkt_flags;
8692 mutex_exit(&sbp->mtx);
8693
8694 #ifdef SAN_DIAG_SUPPORT
8695 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
8696 (HBA_WWN *)&ndlp->nlp_portname, lun);
8697 #endif /* SAN_DIAG_SUPPORT */
8698
8699 iocbq->flag |= IOCB_PRIORITY;
8700
8701 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8702 "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun,
8703 cmd[0], cmd[1]);
8704
8705 /* Flush the IO's on the tx queues for this lun */
8706 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
8707
8708 /* This is the lun reset fcp cmd */
8709 reset = 1;
8710 }
8711
8712 channel = emlxs_select_fcp_channel(hba, ndlp, reset);
8713
8714 #ifdef SAN_DIAG_SUPPORT
8715 sbp->sd_start_time = gethrtime();
8716 #endif /* SAN_DIAG_SUPPORT */
8717
8718 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8719 emlxs_swap_fcp_pkt(sbp);
8720 #endif /* EMLXS_MODREV2X */
8721
8722 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
8723
8724 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
8725 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
8726 }
8727
8728 if (reset == 0) {
8729 /*
8730 * tgt lun reset fcp cmd has been prepared
8731 * separately in the beginning
8732 */
8733 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8734 channel)) != FC_SUCCESS) {
8735
8736 if (rval == 0xff) {
8737 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8738 0, 1);
8739 rval = FC_SUCCESS;
8740 }
8741
8742 return (rval);
8743 }
8744 }
8745
8746 cp = &hba->chan[channel];
8747 cp->ulpSendCmd++;
8748
8749 /* Initalize sbp */
8750 mutex_enter(&sbp->mtx);
8751 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8752 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8753 sbp->node = (void *)ndlp;
8754 sbp->lun = lun;
8755 sbp->class = iocb->ULPCLASS;
8756 sbp->did = ndlp->nlp_DID;
8757 mutex_exit(&sbp->mtx);
8758
8759 if (pkt->pkt_cmdlen) {
8760 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8761 DDI_DMA_SYNC_FORDEV);
8762 }
8763
8764 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
8765 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
8766 DDI_DMA_SYNC_FORDEV);
8767 }
8768
8769 HBASTATS.FcpIssued++;
8770
8771 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8772 return (FC_SUCCESS);
8773
8774 } /* emlxs_send_fcp_cmd() */
8775
8776
8777
8778
8779 /*
8780 * We have to consider this setup works for INTX, MSI, and MSIX
8781 * For INTX, intr_count is always 1
8782 * For MSI, intr_count is always 2 by default
8783 * For MSIX, intr_count is configurable (1, 2, 4, 8) for now.
8784 */
8785 extern int
emlxs_select_msiid(emlxs_hba_t * hba)8786 emlxs_select_msiid(emlxs_hba_t *hba)
8787 {
8788 int msiid = 0;
8789
8790 /* We use round-robin */
8791 mutex_enter(&EMLXS_MSIID_LOCK);
8792 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8793 msiid = hba->last_msiid;
8794 hba->last_msiid ++;
8795 if (hba->last_msiid >= hba->intr_count) {
8796 hba->last_msiid = 0;
8797 }
8798 } else {
8799 /* This should work for INTX and MSI also */
8800 /* For SLI3 the chan_count is always 4 */
8801 /* For SLI3 the msiid is limited to chan_count */
8802 msiid = hba->last_msiid;
8803 hba->last_msiid ++;
8804 if (hba->intr_count > hba->chan_count) {
8805 if (hba->last_msiid >= hba->chan_count) {
8806 hba->last_msiid = 0;
8807 }
8808 } else {
8809 if (hba->last_msiid >= hba->intr_count) {
8810 hba->last_msiid = 0;
8811 }
8812 }
8813 }
8814 mutex_exit(&EMLXS_MSIID_LOCK);
8815
8816 return (msiid);
8817 } /* emlxs_select_msiid */
8818
8819
8820 /*
8821 * A channel has a association with a msi id.
8822 * One msi id could be associated with multiple channels.
8823 */
8824 extern int
emlxs_msiid_to_chan(emlxs_hba_t * hba,int msi_id)8825 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id)
8826 {
8827 emlxs_config_t *cfg = &CFG;
8828 EQ_DESC_t *eqp;
8829 int chan;
8830 int num_wq;
8831
8832 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8833 /* For SLI4 round robin all WQs associated with the msi_id */
8834 eqp = &hba->sli.sli4.eq[msi_id];
8835
8836 mutex_enter(&eqp->lastwq_lock);
8837 chan = eqp->lastwq;
8838 eqp->lastwq++;
8839 num_wq = cfg[CFG_NUM_WQ].current;
8840 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
8841 eqp->lastwq -= num_wq;
8842 }
8843 mutex_exit(&eqp->lastwq_lock);
8844
8845 return (chan);
8846 } else {
8847 /* This is for SLI3 mode */
8848 return (hba->msi2chan[msi_id]);
8849 }
8850
8851 } /* emlxs_msiid_to_chan */
8852
8853
8854 #ifdef SFCT_SUPPORT
8855 static int32_t
emlxs_send_fct_status(emlxs_port_t * port,emlxs_buf_t * sbp)8856 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
8857 {
8858 emlxs_hba_t *hba = HBA;
8859 IOCBQ *iocbq;
8860 IOCB *iocb;
8861 NODELIST *ndlp;
8862 CHANNEL *cp;
8863 uint32_t did;
8864
8865 did = sbp->did;
8866 ndlp = sbp->node;
8867 cp = (CHANNEL *)sbp->channel;
8868
8869 iocbq = &sbp->iocbq;
8870 iocb = &iocbq->iocb;
8871
8872 /* Make sure node is still active */
8873 if (!ndlp->nlp_active) {
8874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8875 "*Node not found. did=%x", did);
8876
8877 return (FC_BADPACKET);
8878 }
8879
8880 /* If gate is closed */
8881 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8882 return (FC_TRAN_BUSY);
8883 }
8884
8885 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8886 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8887 IOERR_SUCCESS) {
8888 return (FC_TRAN_BUSY);
8889 }
8890
8891 HBASTATS.FcpIssued++;
8892
8893 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8894
8895 return (FC_SUCCESS);
8896
8897 } /* emlxs_send_fct_status() */
8898
8899
8900 static int32_t
emlxs_send_fct_abort(emlxs_port_t * port,emlxs_buf_t * sbp)8901 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8902 {
8903 emlxs_hba_t *hba = HBA;
8904 IOCBQ *iocbq;
8905 IOCB *iocb;
8906 NODELIST *ndlp;
8907 CHANNEL *cp;
8908 uint32_t did;
8909
8910 did = sbp->did;
8911 ndlp = sbp->node;
8912 cp = (CHANNEL *)sbp->channel;
8913
8914 iocbq = &sbp->iocbq;
8915 iocb = &iocbq->iocb;
8916
8917 /* Make sure node is still active */
8918 if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8919 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8920 "*Node not found. did=%x", did);
8921
8922 return (FC_BADPACKET);
8923 }
8924
8925 /* If gate is closed */
8926 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8927 return (FC_TRAN_BUSY);
8928 }
8929
8930 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8931 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8932 IOERR_SUCCESS) {
8933 return (FC_TRAN_BUSY);
8934 }
8935
8936 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8937
8938 return (FC_SUCCESS);
8939
8940 } /* emlxs_send_fct_abort() */
8941
8942 #endif /* SFCT_SUPPORT */
8943
8944
8945 static int32_t
emlxs_send_ip(emlxs_port_t * port,emlxs_buf_t * sbp)8946 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8947 {
8948 emlxs_hba_t *hba = HBA;
8949 fc_packet_t *pkt;
8950 IOCBQ *iocbq;
8951 IOCB *iocb;
8952 CHANNEL *cp;
8953 uint32_t i;
8954 NODELIST *ndlp;
8955 uint32_t did;
8956 int32_t rval;
8957
8958 pkt = PRIV2PKT(sbp);
8959 cp = &hba->chan[hba->channel_ip];
8960 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8961
8962 /* Check if node exists */
8963 /* Broadcast did is always a success */
8964 ndlp = emlxs_node_find_did(port, did, 1);
8965
8966 if (!ndlp || !ndlp->nlp_active) {
8967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8968 "Node not found. did=0x%x", did);
8969
8970 return (FC_BADPACKET);
8971 }
8972
8973 /* Check if gate is temporarily closed */
8974 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8975 return (FC_TRAN_BUSY);
8976 }
8977
8978 /* Check if an exchange has been created */
8979 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8980 /* No exchange. Try creating one */
8981 (void) emlxs_create_xri(port, cp, ndlp);
8982
8983 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8984 "Adapter Busy. Exchange not found. did=0x%x", did);
8985
8986 return (FC_TRAN_BUSY);
8987 }
8988
8989 /* ULP PATCH: pkt_cmdlen was found to be set to zero */
8990 /* on BROADCAST commands */
8991 if (pkt->pkt_cmdlen == 0) {
8992 /* Set the pkt_cmdlen to the cookie size */
8993 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8994 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8995 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8996 }
8997 #else
8998 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8999 #endif /* >= EMLXS_MODREV3 */
9000
9001 }
9002
9003 iocbq = &sbp->iocbq;
9004 iocb = &iocbq->iocb;
9005
9006 iocbq->node = (void *)ndlp;
9007 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
9008
9009 if (rval == 0xff) {
9010 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9011 rval = FC_SUCCESS;
9012 }
9013
9014 return (rval);
9015 }
9016
9017 cp->ulpSendCmd++;
9018
9019 /* Initalize sbp */
9020 mutex_enter(&sbp->mtx);
9021 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9022 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9023 sbp->node = (void *)ndlp;
9024 sbp->lun = EMLXS_LUN_NONE;
9025 sbp->class = iocb->ULPCLASS;
9026 sbp->did = did;
9027 mutex_exit(&sbp->mtx);
9028
9029 if (pkt->pkt_cmdlen) {
9030 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9031 DDI_DMA_SYNC_FORDEV);
9032 }
9033
9034 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9035
9036 return (FC_SUCCESS);
9037
9038 } /* emlxs_send_ip() */
9039
9040
9041 static int32_t
emlxs_send_els(emlxs_port_t * port,emlxs_buf_t * sbp)9042 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
9043 {
9044 emlxs_hba_t *hba = HBA;
9045 emlxs_port_t *vport;
9046 fc_packet_t *pkt;
9047 IOCBQ *iocbq;
9048 CHANNEL *cp;
9049 SERV_PARM *sp;
9050 uint32_t cmd;
9051 int i;
9052 ELS_PKT *els_pkt;
9053 NODELIST *ndlp;
9054 uint32_t did;
9055 char fcsp_msg[32];
9056 int rc;
9057 int32_t rval;
9058 emlxs_config_t *cfg = &CFG;
9059
9060 fcsp_msg[0] = 0;
9061 pkt = PRIV2PKT(sbp);
9062 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9063 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9064
9065 iocbq = &sbp->iocbq;
9066
9067 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9068 emlxs_swap_els_pkt(sbp);
9069 #endif /* EMLXS_MODREV2X */
9070
9071 cmd = *((uint32_t *)pkt->pkt_cmd);
9072 cmd &= ELS_CMD_MASK;
9073
9074 /* Point of no return, except for ADISC & PLOGI */
9075
9076 /* Check node */
9077 switch (cmd) {
9078 case ELS_CMD_FLOGI:
9079 case ELS_CMD_FDISC:
9080 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9081
9082 if (emlxs_vpi_logi_notify(port, sbp)) {
9083 pkt->pkt_state = FC_PKT_LOCAL_RJT;
9084 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9085 emlxs_unswap_pkt(sbp);
9086 #endif /* EMLXS_MODREV2X */
9087 return (FC_FAILURE);
9088 }
9089 } else {
9090 /*
9091 * If FLOGI is already complete, then we
9092 * should not be receiving another FLOGI.
9093 * Reset the link to recover.
9094 */
9095 if (port->flag & EMLXS_PORT_FLOGI_CMPL) {
9096 pkt->pkt_state = FC_PKT_LOCAL_RJT;
9097 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9098 emlxs_unswap_pkt(sbp);
9099 #endif /* EMLXS_MODREV2X */
9100
9101 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
9102 return (FC_FAILURE);
9103 }
9104
9105 if (port->vpi > 0) {
9106 *((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC;
9107 }
9108 }
9109
9110 /* Command may have been changed */
9111 cmd = *((uint32_t *)pkt->pkt_cmd);
9112 cmd &= ELS_CMD_MASK;
9113
9114 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9115 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9116 }
9117
9118 ndlp = NULL;
9119
9120 /* We will process these cmds at the bottom of this routine */
9121 break;
9122
9123 case ELS_CMD_PLOGI:
9124 /* Make sure we don't log into ourself */
9125 for (i = 0; i < MAX_VPORTS; i++) {
9126 vport = &VPORT(i);
9127
9128 if (!(vport->flag & EMLXS_INI_BOUND)) {
9129 continue;
9130 }
9131
9132 if (did == vport->did) {
9133 pkt->pkt_state = FC_PKT_NPORT_RJT;
9134
9135 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9136 emlxs_unswap_pkt(sbp);
9137 #endif /* EMLXS_MODREV2X */
9138
9139 return (FC_FAILURE);
9140 }
9141 }
9142
9143 ndlp = NULL;
9144
9145 if (hba->flag & FC_PT_TO_PT) {
9146 MAILBOXQ *mbox;
9147
9148 /* ULP bug fix */
9149 if (pkt->pkt_cmd_fhdr.s_id == 0) {
9150 pkt->pkt_cmd_fhdr.s_id = FP_DEFAULT_SID;
9151 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
9152 "PLOGI: P2P Fix. sid=0-->%x did=%x",
9153 pkt->pkt_cmd_fhdr.s_id,
9154 pkt->pkt_cmd_fhdr.d_id);
9155 }
9156
9157 mutex_enter(&EMLXS_PORT_LOCK);
9158 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
9159 port->rdid = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9160 mutex_exit(&EMLXS_PORT_LOCK);
9161
9162 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
9163 /* Update our service parms */
9164 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
9165 MEM_MBOX))) {
9166 emlxs_mb_config_link(hba, mbox);
9167
9168 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
9169 mbox, MBX_NOWAIT, 0);
9170 if ((rc != MBX_BUSY) &&
9171 (rc != MBX_SUCCESS)) {
9172 emlxs_mem_put(hba, MEM_MBOX,
9173 (void *)mbox);
9174 }
9175 }
9176 }
9177 }
9178
9179 /* We will process these cmds at the bottom of this routine */
9180 break;
9181
9182 default:
9183 ndlp = emlxs_node_find_did(port, did, 1);
9184
9185 /* If an ADISC is being sent and we have no node, */
9186 /* then we must fail the ADISC now */
9187 if (!ndlp && (cmd == ELS_CMD_ADISC) &&
9188 (port->mode == MODE_INITIATOR)) {
9189
9190 /* Build the LS_RJT response */
9191 els_pkt = (ELS_PKT *)pkt->pkt_resp;
9192 els_pkt->elsCode = 0x01;
9193 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
9194 els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
9195 LSRJT_LOGICAL_ERR;
9196 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
9197 LSEXP_NOTHING_MORE;
9198 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
9199
9200 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9201 "ADISC Rejected. Node not found. did=0x%x", did);
9202
9203 if (sbp->channel == NULL) {
9204 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9205 sbp->channel =
9206 &hba->chan[hba->channel_els];
9207 } else {
9208 sbp->channel =
9209 &hba->chan[FC_ELS_RING];
9210 }
9211 }
9212
9213 /* Return this as rejected by the target */
9214 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
9215
9216 return (FC_SUCCESS);
9217 }
9218 }
9219
9220 /* DID == BCAST_DID is special case to indicate that */
9221 /* RPI is being passed in seq_id field */
9222 /* This is used by emlxs_send_logo() for target mode */
9223
9224 /* Initalize iocbq */
9225 iocbq->node = (void *)ndlp;
9226 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9227
9228 if (rval == 0xff) {
9229 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9230 rval = FC_SUCCESS;
9231 }
9232
9233 return (rval);
9234 }
9235
9236 cp = &hba->chan[hba->channel_els];
9237 cp->ulpSendCmd++;
9238 sp = (SERV_PARM *)&els_pkt->un.logi;
9239
9240 /* Check cmd */
9241 switch (cmd) {
9242 case ELS_CMD_PRLI:
9243 /*
9244 * if our firmware version is 3.20 or later,
9245 * set the following bits for FC-TAPE support.
9246 */
9247 if ((port->mode == MODE_INITIATOR) &&
9248 (hba->vpd.feaLevelHigh >= 0x02) &&
9249 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9250 els_pkt->un.prli.ConfmComplAllowed = 1;
9251 els_pkt->un.prli.Retry = 1;
9252 els_pkt->un.prli.TaskRetryIdReq = 1;
9253 } else {
9254 els_pkt->un.prli.ConfmComplAllowed = 0;
9255 els_pkt->un.prli.Retry = 0;
9256 els_pkt->un.prli.TaskRetryIdReq = 0;
9257 }
9258
9259 break;
9260
9261 /* This is a patch for the ULP stack. */
9262
9263 /*
9264 * ULP only reads our service parameters once during bind_port,
9265 * but the service parameters change due to topology.
9266 */
9267 case ELS_CMD_FLOGI:
9268 case ELS_CMD_FDISC:
9269 case ELS_CMD_PLOGI:
9270 case ELS_CMD_PDISC:
9271 /* Copy latest service parameters to payload */
9272 bcopy((void *) &port->sparam, (void *)sp, sizeof (SERV_PARM));
9273
9274 if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
9275
9276 /* Clear support for virtual fabrics */
9277 /* randomOffset bit controls this for FLOGI */
9278 sp->cmn.randomOffset = 0;
9279
9280 /* Set R_A_TOV to current value */
9281 sp->cmn.w2.r_a_tov =
9282 LE_SWAP32((hba->fc_ratov * 1000));
9283 }
9284
9285 if ((hba->flag & FC_NPIV_ENABLED) &&
9286 (hba->flag & FC_NPIV_SUPPORTED) &&
9287 (cmd == ELS_CMD_PLOGI)) {
9288 emlxs_vvl_fmt_t *vvl;
9289
9290 sp->VALID_VENDOR_VERSION = 1;
9291 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
9292 vvl->un0.w0.oui = 0x0000C9;
9293 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
9294 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0;
9295 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
9296 }
9297
9298 #ifdef DHCHAP_SUPPORT
9299 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9300 #endif /* DHCHAP_SUPPORT */
9301
9302 break;
9303 }
9304
9305 /* Initialize the sbp */
9306 mutex_enter(&sbp->mtx);
9307 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9308 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9309 sbp->node = (void *)ndlp;
9310 sbp->lun = EMLXS_LUN_NONE;
9311 sbp->did = did;
9312 mutex_exit(&sbp->mtx);
9313
9314 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
9315 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
9316
9317 if (pkt->pkt_cmdlen) {
9318 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9319 DDI_DMA_SYNC_FORDEV);
9320 }
9321
9322 /* Check node */
9323 switch (cmd) {
9324 case ELS_CMD_FLOGI:
9325 case ELS_CMD_FDISC:
9326 if (port->mode == MODE_INITIATOR) {
9327 /* Make sure fabric node is destroyed */
9328 /* It should already have been destroyed at link down */
9329 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
9330 ndlp = emlxs_node_find_did(port, FABRIC_DID, 1);
9331 if (ndlp) {
9332 if (EMLXS_SLI_UNREG_NODE(port, ndlp,
9333 NULL, NULL, iocbq) == 0) {
9334 /* Deferring iocb tx until */
9335 /* completion of unreg */
9336 return (FC_SUCCESS);
9337 }
9338 }
9339 }
9340 }
9341 break;
9342
9343 case ELS_CMD_PLOGI:
9344
9345 ndlp = emlxs_node_find_did(port, did, 1);
9346
9347 if (ndlp && ndlp->nlp_active) {
9348 /* Close the node for any further normal IO */
9349 emlxs_node_close(port, ndlp, hba->channel_fcp,
9350 pkt->pkt_timeout + 10);
9351 emlxs_node_close(port, ndlp, hba->channel_ip,
9352 pkt->pkt_timeout + 10);
9353
9354 /* Flush tx queues */
9355 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9356
9357 /* Flush chip queues */
9358 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9359 }
9360
9361 break;
9362
9363 case ELS_CMD_PRLI:
9364
9365 ndlp = emlxs_node_find_did(port, did, 1);
9366
9367 if (ndlp && ndlp->nlp_active) {
9368 /*
9369 * Close the node for any further FCP IO;
9370 * Flush all outstanding I/O only if
9371 * "Establish Image Pair" bit is set.
9372 */
9373 emlxs_node_close(port, ndlp, hba->channel_fcp,
9374 pkt->pkt_timeout + 10);
9375
9376 if (els_pkt->un.prli.estabImagePair) {
9377 /* Flush tx queues */
9378 (void) emlxs_tx_node_flush(port, ndlp,
9379 &hba->chan[hba->channel_fcp], 0, 0);
9380
9381 /* Flush chip queues */
9382 (void) emlxs_chipq_node_flush(port,
9383 &hba->chan[hba->channel_fcp], ndlp, 0);
9384 }
9385 }
9386
9387 break;
9388
9389 }
9390
9391 HBASTATS.ElsCmdIssued++;
9392
9393 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9394
9395 return (FC_SUCCESS);
9396
9397 } /* emlxs_send_els() */
9398
9399
9400
9401
9402 static int32_t
emlxs_send_els_rsp(emlxs_port_t * port,emlxs_buf_t * sbp)9403 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9404 {
9405 emlxs_hba_t *hba = HBA;
9406 emlxs_config_t *cfg = &CFG;
9407 fc_packet_t *pkt;
9408 IOCBQ *iocbq;
9409 IOCB *iocb;
9410 NODELIST *ndlp;
9411 CHANNEL *cp;
9412 int i;
9413 uint32_t cmd;
9414 uint32_t ucmd;
9415 ELS_PKT *els_pkt;
9416 fc_unsol_buf_t *ubp;
9417 emlxs_ub_priv_t *ub_priv;
9418 uint32_t did;
9419 char fcsp_msg[32];
9420 uint8_t *ub_buffer;
9421 int32_t rval;
9422
9423 fcsp_msg[0] = 0;
9424 pkt = PRIV2PKT(sbp);
9425 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9426 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9427
9428 iocbq = &sbp->iocbq;
9429 iocb = &iocbq->iocb;
9430
9431 /* Acquire the unsolicited command this pkt is replying to */
9432 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
9433 /* This is for auto replies when no ub's are used */
9434 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
9435 ubp = NULL;
9436 ub_priv = NULL;
9437 ub_buffer = NULL;
9438
9439 #ifdef SFCT_SUPPORT
9440 if (sbp->fct_cmd) {
9441 fct_els_t *els =
9442 (fct_els_t *)sbp->fct_cmd->cmd_specific;
9443 ub_buffer = (uint8_t *)els->els_req_payload;
9444 }
9445 #endif /* SFCT_SUPPORT */
9446
9447 } else {
9448 /* Find the ub buffer that goes with this reply */
9449 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
9450 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
9451 "ELS reply: Invalid oxid=%x",
9452 pkt->pkt_cmd_fhdr.ox_id);
9453 return (FC_BADPACKET);
9454 }
9455
9456 ub_buffer = (uint8_t *)ubp->ub_buffer;
9457 ub_priv = ubp->ub_fca_private;
9458 ucmd = ub_priv->cmd;
9459
9460 ub_priv->flags |= EMLXS_UB_REPLY;
9461
9462 /* Reset oxid to ELS command */
9463 /* We do this because the ub is only valid */
9464 /* until we return from this thread */
9465 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
9466 }
9467
9468 /* Save the result */
9469 sbp->ucmd = ucmd;
9470
9471 if (sbp->channel == NULL) {
9472 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9473 sbp->channel = &hba->chan[hba->channel_els];
9474 } else {
9475 sbp->channel = &hba->chan[FC_ELS_RING];
9476 }
9477 }
9478
9479 /* Check for interceptions */
9480 switch (ucmd) {
9481
9482 #ifdef ULP_PATCH2
9483 case ELS_CMD_LOGO:
9484 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
9485 break;
9486 }
9487
9488 /* Check if this was generated by ULP and not us */
9489 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9490
9491 /*
9492 * Since we replied to this already,
9493 * we won't need to send this now
9494 */
9495 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9496
9497 return (FC_SUCCESS);
9498 }
9499
9500 break;
9501 #endif /* ULP_PATCH2 */
9502
9503 #ifdef ULP_PATCH3
9504 case ELS_CMD_PRLI:
9505 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
9506 break;
9507 }
9508
9509 /* Check if this was generated by ULP and not us */
9510 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9511
9512 /*
9513 * Since we replied to this already,
9514 * we won't need to send this now
9515 */
9516 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9517
9518 return (FC_SUCCESS);
9519 }
9520
9521 break;
9522 #endif /* ULP_PATCH3 */
9523
9524
9525 #ifdef ULP_PATCH4
9526 case ELS_CMD_PRLO:
9527 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
9528 break;
9529 }
9530
9531 /* Check if this was generated by ULP and not us */
9532 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9533 /*
9534 * Since we replied to this already,
9535 * we won't need to send this now
9536 */
9537 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9538
9539 return (FC_SUCCESS);
9540 }
9541
9542 break;
9543 #endif /* ULP_PATCH4 */
9544
9545 #ifdef ULP_PATCH6
9546 case ELS_CMD_RSCN:
9547 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
9548 break;
9549 }
9550
9551 /* Check if this RSCN was generated by us */
9552 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9553 cmd = *((uint32_t *)pkt->pkt_cmd);
9554 cmd = LE_SWAP32(cmd);
9555 cmd &= ELS_CMD_MASK;
9556
9557 /*
9558 * If ULP is accepting this,
9559 * then close affected node
9560 */
9561 if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9562 cmd == ELS_CMD_ACC) {
9563 fc_rscn_t *rscn;
9564 uint32_t count;
9565 uint32_t *lp;
9566
9567 /*
9568 * Only the Leadville code path will
9569 * come thru here. The RSCN data is NOT
9570 * swapped properly for the Comstar code
9571 * path.
9572 */
9573 lp = (uint32_t *)ub_buffer;
9574 rscn = (fc_rscn_t *)lp++;
9575 count =
9576 ((rscn->rscn_payload_len - 4) / 4);
9577
9578 /* Close affected ports */
9579 for (i = 0; i < count; i++, lp++) {
9580 (void) emlxs_port_offline(port,
9581 *lp);
9582 }
9583 }
9584
9585 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9586 "RSCN %s: did=%x oxid=%x rxid=%x. "
9587 "Intercepted.", emlxs_elscmd_xlate(cmd),
9588 did, pkt->pkt_cmd_fhdr.ox_id,
9589 pkt->pkt_cmd_fhdr.rx_id);
9590
9591 /*
9592 * Since we generated this RSCN,
9593 * we won't need to send this reply
9594 */
9595 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9596
9597 return (FC_SUCCESS);
9598 }
9599
9600 break;
9601 #endif /* ULP_PATCH6 */
9602
9603 case ELS_CMD_PLOGI:
9604 /* Check if this PLOGI was generated by us */
9605 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9606 cmd = *((uint32_t *)pkt->pkt_cmd);
9607 cmd = LE_SWAP32(cmd);
9608 cmd &= ELS_CMD_MASK;
9609
9610 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9611 "PLOGI %s: did=%x oxid=%x rxid=%x. "
9612 "Intercepted.", emlxs_elscmd_xlate(cmd),
9613 did, pkt->pkt_cmd_fhdr.ox_id,
9614 pkt->pkt_cmd_fhdr.rx_id);
9615
9616 /*
9617 * Since we generated this PLOGI,
9618 * we won't need to send this reply
9619 */
9620 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9621
9622 return (FC_SUCCESS);
9623 }
9624
9625 break;
9626 }
9627
9628 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9629 emlxs_swap_els_pkt(sbp);
9630 #endif /* EMLXS_MODREV2X */
9631
9632
9633 cmd = *((uint32_t *)pkt->pkt_cmd);
9634 cmd &= ELS_CMD_MASK;
9635
9636 /* Check if modifications are needed */
9637 switch (ucmd) {
9638 case (ELS_CMD_PRLI):
9639
9640 if (cmd == ELS_CMD_ACC) {
9641 /* This is a patch for the ULP stack. */
9642 /* ULP does not keep track of FCP2 support */
9643 if ((port->mode == MODE_INITIATOR) &&
9644 (hba->vpd.feaLevelHigh >= 0x02) &&
9645 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9646 els_pkt->un.prli.ConfmComplAllowed = 1;
9647 els_pkt->un.prli.Retry = 1;
9648 els_pkt->un.prli.TaskRetryIdReq = 1;
9649 } else {
9650 els_pkt->un.prli.ConfmComplAllowed = 0;
9651 els_pkt->un.prli.Retry = 0;
9652 els_pkt->un.prli.TaskRetryIdReq = 0;
9653 }
9654 }
9655
9656 break;
9657
9658 case ELS_CMD_FLOGI:
9659 case ELS_CMD_FDISC:
9660 if (cmd == ELS_CMD_ACC) {
9661 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9662
9663 /* This is a patch for the ULP stack. */
9664
9665 /*
9666 * ULP only reads our service parameters
9667 * once during bind_port, but the service
9668 * parameters change due to topology.
9669 */
9670
9671 /* Copy latest service parameters to payload */
9672 bcopy((void *)&port->sparam,
9673 (void *)sp, sizeof (SERV_PARM));
9674
9675 /* We are in pt-to-pt mode. Set R_A_TOV to default */
9676 sp->cmn.w2.r_a_tov =
9677 LE_SWAP32((FF_DEF_RATOV * 1000));
9678
9679 /* Clear support for virtual fabrics */
9680 /* randomOffset bit controls this for FLOGI */
9681 sp->cmn.randomOffset = 0;
9682 #ifdef DHCHAP_SUPPORT
9683 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9684 #endif /* DHCHAP_SUPPORT */
9685 }
9686 break;
9687
9688 case ELS_CMD_PLOGI:
9689 case ELS_CMD_PDISC:
9690 if (cmd == ELS_CMD_ACC) {
9691 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9692
9693 /* This is a patch for the ULP stack. */
9694
9695 /*
9696 * ULP only reads our service parameters
9697 * once during bind_port, but the service
9698 * parameters change due to topology.
9699 */
9700
9701 /* Copy latest service parameters to payload */
9702 bcopy((void *)&port->sparam,
9703 (void *)sp, sizeof (SERV_PARM));
9704
9705 #ifdef DHCHAP_SUPPORT
9706 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9707 #endif /* DHCHAP_SUPPORT */
9708 }
9709 break;
9710
9711 }
9712
9713 /* Initalize iocbq */
9714 iocbq->node = (void *)NULL;
9715 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9716
9717 if (rval == 0xff) {
9718 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9719 rval = FC_SUCCESS;
9720 }
9721
9722 return (rval);
9723 }
9724
9725 cp = &hba->chan[hba->channel_els];
9726 cp->ulpSendCmd++;
9727
9728 /* Initalize sbp */
9729 mutex_enter(&sbp->mtx);
9730 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9731 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9732 sbp->node = (void *) NULL;
9733 sbp->lun = EMLXS_LUN_NONE;
9734 sbp->class = iocb->ULPCLASS;
9735 sbp->did = did;
9736 mutex_exit(&sbp->mtx);
9737
9738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9739 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
9740 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
9741 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
9742
9743 /* Process nodes */
9744 switch (ucmd) {
9745 case ELS_CMD_RSCN:
9746 if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9747 cmd == ELS_CMD_ACC) {
9748 fc_rscn_t *rscn;
9749 uint32_t count;
9750 uint32_t *lp = NULL;
9751
9752 /*
9753 * Only the Leadville code path will come thru
9754 * here. The RSCN data is NOT swapped properly
9755 * for the Comstar code path.
9756 */
9757 lp = (uint32_t *)ub_buffer;
9758 rscn = (fc_rscn_t *)lp++;
9759 count = ((rscn->rscn_payload_len - 4) / 4);
9760
9761 /* Close affected ports */
9762 for (i = 0; i < count; i++, lp++) {
9763 (void) emlxs_port_offline(port, *lp);
9764 }
9765 }
9766 break;
9767
9768 case ELS_CMD_PLOGI:
9769 if (cmd == ELS_CMD_ACC) {
9770 ndlp = emlxs_node_find_did(port, did, 1);
9771
9772 if (ndlp && ndlp->nlp_active) {
9773 /* Close the node for any further normal IO */
9774 emlxs_node_close(port, ndlp, hba->channel_fcp,
9775 pkt->pkt_timeout + 10);
9776 emlxs_node_close(port, ndlp, hba->channel_ip,
9777 pkt->pkt_timeout + 10);
9778
9779 /* Flush tx queue */
9780 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9781
9782 /* Flush chip queue */
9783 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9784 }
9785 }
9786 break;
9787
9788 case ELS_CMD_PRLI:
9789 if (cmd == ELS_CMD_ACC) {
9790 ndlp = emlxs_node_find_did(port, did, 1);
9791
9792 if (ndlp && ndlp->nlp_active) {
9793 /* Close the node for any further normal IO */
9794 emlxs_node_close(port, ndlp, hba->channel_fcp,
9795 pkt->pkt_timeout + 10);
9796
9797 /* Flush tx queues */
9798 (void) emlxs_tx_node_flush(port, ndlp,
9799 &hba->chan[hba->channel_fcp], 0, 0);
9800
9801 /* Flush chip queues */
9802 (void) emlxs_chipq_node_flush(port,
9803 &hba->chan[hba->channel_fcp], ndlp, 0);
9804 }
9805 }
9806 break;
9807
9808 case ELS_CMD_PRLO:
9809 if (cmd == ELS_CMD_ACC) {
9810 ndlp = emlxs_node_find_did(port, did, 1);
9811
9812 if (ndlp && ndlp->nlp_active) {
9813 /* Close the node for any further normal IO */
9814 emlxs_node_close(port, ndlp,
9815 hba->channel_fcp, 60);
9816
9817 /* Flush tx queues */
9818 (void) emlxs_tx_node_flush(port, ndlp,
9819 &hba->chan[hba->channel_fcp], 0, 0);
9820
9821 /* Flush chip queues */
9822 (void) emlxs_chipq_node_flush(port,
9823 &hba->chan[hba->channel_fcp], ndlp, 0);
9824 }
9825 }
9826
9827 break;
9828
9829 case ELS_CMD_LOGO:
9830 if (cmd == ELS_CMD_ACC) {
9831 ndlp = emlxs_node_find_did(port, did, 1);
9832
9833 if (ndlp && ndlp->nlp_active) {
9834 /* Close the node for any further normal IO */
9835 emlxs_node_close(port, ndlp,
9836 hba->channel_fcp, 60);
9837 emlxs_node_close(port, ndlp,
9838 hba->channel_ip, 60);
9839
9840 /* Flush tx queues */
9841 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9842
9843 /* Flush chip queues */
9844 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9845 }
9846 }
9847
9848 break;
9849 }
9850
9851 if (pkt->pkt_cmdlen) {
9852 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9853 DDI_DMA_SYNC_FORDEV);
9854 }
9855
9856 HBASTATS.ElsRspIssued++;
9857
9858 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9859
9860 return (FC_SUCCESS);
9861
9862 } /* emlxs_send_els_rsp() */
9863
9864
9865 #ifdef MENLO_SUPPORT
9866 static int32_t
emlxs_send_menlo(emlxs_port_t * port,emlxs_buf_t * sbp)9867 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
9868 {
9869 emlxs_hba_t *hba = HBA;
9870 fc_packet_t *pkt;
9871 IOCBQ *iocbq;
9872 IOCB *iocb;
9873 CHANNEL *cp;
9874 NODELIST *ndlp;
9875 uint32_t did;
9876 uint32_t *lp;
9877 int32_t rval;
9878
9879 pkt = PRIV2PKT(sbp);
9880 did = EMLXS_MENLO_DID;
9881 lp = (uint32_t *)pkt->pkt_cmd;
9882
9883 iocbq = &sbp->iocbq;
9884 iocb = &iocbq->iocb;
9885
9886 ndlp = emlxs_node_find_did(port, did, 1);
9887
9888 if (!ndlp || !ndlp->nlp_active) {
9889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9890 "Node not found. did=0x%x", did);
9891
9892 return (FC_BADPACKET);
9893 }
9894
9895 iocbq->node = (void *) ndlp;
9896 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9897
9898 if (rval == 0xff) {
9899 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9900 rval = FC_SUCCESS;
9901 }
9902
9903 return (rval);
9904 }
9905
9906 cp = &hba->chan[hba->channel_ct];
9907 cp->ulpSendCmd++;
9908
9909 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9910 /* Cmd phase */
9911
9912 /* Initalize iocb */
9913 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9914 iocb->ULPCONTEXT = 0;
9915 iocb->ULPPU = 3;
9916
9917 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9918 "%s: [%08x,%08x,%08x,%08x]",
9919 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9920 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9921
9922 } else { /* FC_PKT_OUTBOUND */
9923
9924 /* MENLO_CMD_FW_DOWNLOAD Data Phase */
9925 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9926
9927 /* Initalize iocb */
9928 iocb->un.genreq64.param = 0;
9929 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9930 iocb->ULPPU = 1;
9931
9932 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9933 "%s: Data: rxid=0x%x size=%d",
9934 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9935 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9936 }
9937
9938 /* Initalize sbp */
9939 mutex_enter(&sbp->mtx);
9940 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9941 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9942 sbp->node = (void *) ndlp;
9943 sbp->lun = EMLXS_LUN_NONE;
9944 sbp->class = iocb->ULPCLASS;
9945 sbp->did = did;
9946 mutex_exit(&sbp->mtx);
9947
9948 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9949 DDI_DMA_SYNC_FORDEV);
9950
9951 HBASTATS.CtCmdIssued++;
9952
9953 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9954
9955 return (FC_SUCCESS);
9956
9957 } /* emlxs_send_menlo() */
9958 #endif /* MENLO_SUPPORT */
9959
9960
9961 static int32_t
emlxs_send_ct(emlxs_port_t * port,emlxs_buf_t * sbp)9962 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9963 {
9964 emlxs_hba_t *hba = HBA;
9965 fc_packet_t *pkt;
9966 IOCBQ *iocbq;
9967 IOCB *iocb;
9968 NODELIST *ndlp;
9969 uint32_t did;
9970 CHANNEL *cp;
9971 int32_t rval;
9972
9973 pkt = PRIV2PKT(sbp);
9974 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9975
9976 iocbq = &sbp->iocbq;
9977 iocb = &iocbq->iocb;
9978
9979 ndlp = emlxs_node_find_did(port, did, 1);
9980
9981 if (!ndlp || !ndlp->nlp_active) {
9982 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9983 "Node not found. did=0x%x", did);
9984
9985 return (FC_BADPACKET);
9986 }
9987
9988 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9989 emlxs_swap_ct_pkt(sbp);
9990 #endif /* EMLXS_MODREV2X */
9991
9992 iocbq->node = (void *)ndlp;
9993 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9994
9995 if (rval == 0xff) {
9996 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9997 rval = FC_SUCCESS;
9998 }
9999
10000 return (rval);
10001 }
10002
10003 cp = &hba->chan[hba->channel_ct];
10004 cp->ulpSendCmd++;
10005
10006 /* Initalize sbp */
10007 mutex_enter(&sbp->mtx);
10008 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10009 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10010 sbp->node = (void *)ndlp;
10011 sbp->lun = EMLXS_LUN_NONE;
10012 sbp->class = iocb->ULPCLASS;
10013 sbp->did = did;
10014 mutex_exit(&sbp->mtx);
10015
10016 if (did == NAMESERVER_DID) {
10017 SLI_CT_REQUEST *CtCmd;
10018 uint32_t *lp0;
10019
10020 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10021 lp0 = (uint32_t *)pkt->pkt_cmd;
10022
10023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10024 "%s: did=%x [%08x,%08x]",
10025 emlxs_ctcmd_xlate(
10026 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10027 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10028
10029 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
10030 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
10031 }
10032
10033 } else if (did == FDMI_DID) {
10034 SLI_CT_REQUEST *CtCmd;
10035 uint32_t *lp0;
10036
10037 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10038 lp0 = (uint32_t *)pkt->pkt_cmd;
10039
10040 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10041 "%s: did=%x [%08x,%08x]",
10042 emlxs_mscmd_xlate(
10043 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10044 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10045 } else {
10046 SLI_CT_REQUEST *CtCmd;
10047 uint32_t *lp0;
10048
10049 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10050 lp0 = (uint32_t *)pkt->pkt_cmd;
10051
10052 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10053 "%s: did=%x [%08x,%08x]",
10054 emlxs_rmcmd_xlate(
10055 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10056 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10057 }
10058
10059 if (pkt->pkt_cmdlen) {
10060 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10061 DDI_DMA_SYNC_FORDEV);
10062 }
10063
10064 HBASTATS.CtCmdIssued++;
10065
10066 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10067
10068 return (FC_SUCCESS);
10069
10070 } /* emlxs_send_ct() */
10071
10072
10073 static int32_t
emlxs_send_ct_rsp(emlxs_port_t * port,emlxs_buf_t * sbp)10074 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
10075 {
10076 emlxs_hba_t *hba = HBA;
10077 fc_packet_t *pkt;
10078 CHANNEL *cp;
10079 IOCBQ *iocbq;
10080 IOCB *iocb;
10081 uint32_t *cmd;
10082 SLI_CT_REQUEST *CtCmd;
10083 int32_t rval;
10084
10085 pkt = PRIV2PKT(sbp);
10086 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10087 cmd = (uint32_t *)pkt->pkt_cmd;
10088
10089 iocbq = &sbp->iocbq;
10090 iocb = &iocbq->iocb;
10091
10092 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10093 emlxs_swap_ct_pkt(sbp);
10094 #endif /* EMLXS_MODREV2X */
10095
10096 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
10097
10098 if (rval == 0xff) {
10099 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
10100 rval = FC_SUCCESS;
10101 }
10102
10103 return (rval);
10104 }
10105
10106 cp = &hba->chan[hba->channel_ct];
10107 cp->ulpSendCmd++;
10108
10109 /* Initalize sbp */
10110 mutex_enter(&sbp->mtx);
10111 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10112 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10113 sbp->node = NULL;
10114 sbp->lun = EMLXS_LUN_NONE;
10115 sbp->class = iocb->ULPCLASS;
10116 mutex_exit(&sbp->mtx);
10117
10118 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
10119 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
10120 emlxs_rmcmd_xlate(LE_SWAP16(
10121 CtCmd->CommandResponse.bits.CmdRsp)),
10122 CtCmd->ReasonCode, CtCmd->Explanation,
10123 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
10124 pkt->pkt_cmd_fhdr.rx_id);
10125
10126 if (pkt->pkt_cmdlen) {
10127 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10128 DDI_DMA_SYNC_FORDEV);
10129 }
10130
10131 HBASTATS.CtRspIssued++;
10132
10133 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10134
10135 return (FC_SUCCESS);
10136
10137 } /* emlxs_send_ct_rsp() */
10138
10139
10140 /*
10141 * emlxs_get_instance()
10142 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
10143 */
10144 extern uint32_t
emlxs_get_instance(int32_t ddiinst)10145 emlxs_get_instance(int32_t ddiinst)
10146 {
10147 uint32_t i;
10148 uint32_t inst;
10149
10150 mutex_enter(&emlxs_device.lock);
10151
10152 inst = MAX_FC_BRDS;
10153 for (i = 0; i < emlxs_instance_count; i++) {
10154 if (emlxs_instance[i] == ddiinst) {
10155 inst = i;
10156 break;
10157 }
10158 }
10159
10160 mutex_exit(&emlxs_device.lock);
10161
10162 return (inst);
10163
10164 } /* emlxs_get_instance() */
10165
10166
10167 /*
10168 * emlxs_add_instance()
10169 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
10170 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
10171 */
10172 static uint32_t
emlxs_add_instance(int32_t ddiinst)10173 emlxs_add_instance(int32_t ddiinst)
10174 {
10175 uint32_t i;
10176
10177 mutex_enter(&emlxs_device.lock);
10178
10179 /* First see if the ddiinst already exists */
10180 for (i = 0; i < emlxs_instance_count; i++) {
10181 if (emlxs_instance[i] == ddiinst) {
10182 break;
10183 }
10184 }
10185
10186 /* If it doesn't already exist, add it */
10187 if (i >= emlxs_instance_count) {
10188 if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
10189 emlxs_instance[i] = ddiinst;
10190 emlxs_instance_count++;
10191 emlxs_device.hba_count = emlxs_instance_count;
10192 }
10193 }
10194
10195 mutex_exit(&emlxs_device.lock);
10196
10197 return (i);
10198
10199 } /* emlxs_add_instance() */
10200
10201
10202 /*ARGSUSED*/
10203 extern void
emlxs_pkt_complete(emlxs_buf_t * sbp,uint32_t iostat,uint8_t localstat,uint32_t doneq)10204 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10205 uint32_t doneq)
10206 {
10207 emlxs_hba_t *hba;
10208 emlxs_port_t *port;
10209 emlxs_buf_t *fpkt;
10210
10211 port = sbp->port;
10212
10213 if (!port) {
10214 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
10215 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
10216
10217 return;
10218 }
10219
10220 hba = HBA;
10221
10222 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
10223 (sbp->iotag)) {
10224 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
10225 "WARNING: Completing IO with iotag. sbp=%p iotag=%d "
10226 "xri_flags=%x",
10227 sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0));
10228
10229 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
10230 }
10231
10232 mutex_enter(&sbp->mtx);
10233
10234 /* Check for error conditions */
10235 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
10236 PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
10237 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
10238 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10239 EMLXS_MSGF(EMLXS_CONTEXT,
10240 &emlxs_pkt_completion_error_msg,
10241 "Packet already returned. sbp=%p flags=%x", sbp,
10242 sbp->pkt_flags);
10243 }
10244
10245 else if (sbp->pkt_flags & PACKET_COMPLETED) {
10246 EMLXS_MSGF(EMLXS_CONTEXT,
10247 &emlxs_pkt_completion_error_msg,
10248 "Packet already completed. sbp=%p flags=%x", sbp,
10249 sbp->pkt_flags);
10250 }
10251
10252 else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
10253 EMLXS_MSGF(EMLXS_CONTEXT,
10254 &emlxs_pkt_completion_error_msg,
10255 "Pkt already on done queue. sbp=%p flags=%x", sbp,
10256 sbp->pkt_flags);
10257 }
10258
10259 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
10260 EMLXS_MSGF(EMLXS_CONTEXT,
10261 &emlxs_pkt_completion_error_msg,
10262 "Packet already in completion. sbp=%p flags=%x",
10263 sbp, sbp->pkt_flags);
10264 }
10265
10266 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
10267 EMLXS_MSGF(EMLXS_CONTEXT,
10268 &emlxs_pkt_completion_error_msg,
10269 "Packet still on chip queue. sbp=%p flags=%x",
10270 sbp, sbp->pkt_flags);
10271 }
10272
10273 else if (sbp->pkt_flags & PACKET_IN_TXQ) {
10274 EMLXS_MSGF(EMLXS_CONTEXT,
10275 &emlxs_pkt_completion_error_msg,
10276 "Packet still on tx queue. sbp=%p flags=%x", sbp,
10277 sbp->pkt_flags);
10278 }
10279
10280 mutex_exit(&sbp->mtx);
10281 return;
10282 }
10283
10284 /* Packet is now in completion */
10285 sbp->pkt_flags |= PACKET_IN_COMPLETION;
10286
10287 /* Set the state if not already set */
10288 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10289 emlxs_set_pkt_state(sbp, iostat, localstat, 0);
10290 }
10291
10292 /* Check for parent flush packet */
10293 /* If pkt has a parent flush packet then adjust its count now */
10294 fpkt = sbp->fpkt;
10295 if (fpkt) {
10296 /*
10297 * We will try to NULL sbp->fpkt inside the
10298 * fpkt's mutex if possible
10299 */
10300
10301 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
10302 mutex_enter(&fpkt->mtx);
10303 if (fpkt->flush_count) {
10304 fpkt->flush_count--;
10305 }
10306 sbp->fpkt = NULL;
10307 mutex_exit(&fpkt->mtx);
10308 } else { /* fpkt has been returned already */
10309
10310 sbp->fpkt = NULL;
10311 }
10312 }
10313
10314 /* If pkt is polled, then wake up sleeping thread */
10315 if (sbp->pkt_flags & PACKET_POLLED) {
10316 /* Don't set the PACKET_ULP_OWNED flag here */
10317 /* because the polling thread will do it */
10318 sbp->pkt_flags |= PACKET_COMPLETED;
10319 mutex_exit(&sbp->mtx);
10320
10321 /* Wake up sleeping thread */
10322 mutex_enter(&EMLXS_PKT_LOCK);
10323 cv_broadcast(&EMLXS_PKT_CV);
10324 mutex_exit(&EMLXS_PKT_LOCK);
10325 }
10326
10327 /* If packet was generated by our driver, */
10328 /* then complete it immediately */
10329 else if (sbp->pkt_flags & PACKET_ALLOCATED) {
10330 mutex_exit(&sbp->mtx);
10331
10332 emlxs_iodone(sbp);
10333 }
10334
10335 /* Put the pkt on the done queue for callback */
10336 /* completion in another thread */
10337 else {
10338 sbp->pkt_flags |= PACKET_IN_DONEQ;
10339 sbp->next = NULL;
10340 mutex_exit(&sbp->mtx);
10341
10342 /* Put pkt on doneq, so I/O's will be completed in order */
10343 mutex_enter(&EMLXS_PORT_LOCK);
10344 if (hba->iodone_tail == NULL) {
10345 hba->iodone_list = sbp;
10346 hba->iodone_count = 1;
10347 } else {
10348 hba->iodone_tail->next = sbp;
10349 hba->iodone_count++;
10350 }
10351 hba->iodone_tail = sbp;
10352 mutex_exit(&EMLXS_PORT_LOCK);
10353
10354 /* Trigger a thread to service the doneq */
10355 emlxs_thread_trigger1(&hba->iodone_thread,
10356 emlxs_iodone_server);
10357 }
10358
10359 return;
10360
10361 } /* emlxs_pkt_complete() */
10362
10363
10364 #ifdef SAN_DIAG_SUPPORT
10365 /*
10366 * This routine is called with EMLXS_PORT_LOCK held so we can just increment
10367 * normally. Don't have to use atomic operations.
10368 */
10369 extern void
emlxs_update_sd_bucket(emlxs_buf_t * sbp)10370 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
10371 {
10372 emlxs_port_t *vport;
10373 fc_packet_t *pkt;
10374 uint32_t did;
10375 hrtime_t t;
10376 hrtime_t delta_time;
10377 int i;
10378 NODELIST *ndlp;
10379
10380 vport = sbp->port;
10381
10382 if ((emlxs_sd_bucket.search_type == 0) ||
10383 (vport->sd_io_latency_state != SD_COLLECTING)) {
10384 return;
10385 }
10386
10387 /* Compute the iolatency time in microseconds */
10388 t = gethrtime();
10389 delta_time = t - sbp->sd_start_time;
10390 pkt = PRIV2PKT(sbp);
10391 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
10392 ndlp = emlxs_node_find_did(vport, did, 1);
10393
10394 if (!ndlp) {
10395 return;
10396 }
10397
10398 if (delta_time >=
10399 emlxs_sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) {
10400 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
10401 count++;
10402 } else if (delta_time <= emlxs_sd_bucket.values[0]) {
10403 ndlp->sd_dev_bucket[0].count++;
10404 } else {
10405 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
10406 if ((delta_time > emlxs_sd_bucket.values[i-1]) &&
10407 (delta_time <= emlxs_sd_bucket.values[i])) {
10408 ndlp->sd_dev_bucket[i].count++;
10409 break;
10410 }
10411 }
10412 }
10413
10414 return;
10415
10416 } /* emlxs_update_sd_bucket() */
10417 #endif /* SAN_DIAG_SUPPORT */
10418
10419 /*ARGSUSED*/
10420 static void
emlxs_iodone_server(void * arg1,void * arg2,void * arg3)10421 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
10422 {
10423 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
10424 emlxs_buf_t *sbp;
10425
10426 mutex_enter(&EMLXS_PORT_LOCK);
10427
10428 /* Remove one pkt from the doneq head and complete it */
10429 while ((sbp = hba->iodone_list) != NULL) {
10430 if ((hba->iodone_list = sbp->next) == NULL) {
10431 hba->iodone_tail = NULL;
10432 hba->iodone_count = 0;
10433 } else {
10434 hba->iodone_count--;
10435 }
10436
10437 mutex_exit(&EMLXS_PORT_LOCK);
10438
10439 /* Prepare the pkt for completion */
10440 mutex_enter(&sbp->mtx);
10441 sbp->next = NULL;
10442 sbp->pkt_flags &= ~PACKET_IN_DONEQ;
10443 mutex_exit(&sbp->mtx);
10444
10445 /* Complete the IO now */
10446 emlxs_iodone(sbp);
10447
10448 /* Reacquire lock and check if more work is to be done */
10449 mutex_enter(&EMLXS_PORT_LOCK);
10450 }
10451
10452 mutex_exit(&EMLXS_PORT_LOCK);
10453
10454 #ifdef FMA_SUPPORT
10455 if (hba->flag & FC_DMA_CHECK_ERROR) {
10456 emlxs_thread_spawn(hba, emlxs_restart_thread,
10457 NULL, NULL);
10458 }
10459 #endif /* FMA_SUPPORT */
10460
10461 return;
10462
10463 } /* End emlxs_iodone_server */
10464
10465
10466 static void
emlxs_iodone(emlxs_buf_t * sbp)10467 emlxs_iodone(emlxs_buf_t *sbp)
10468 {
10469 #ifdef FMA_SUPPORT
10470 emlxs_port_t *port = sbp->port;
10471 emlxs_hba_t *hba = port->hba;
10472 #endif /* FMA_SUPPORT */
10473
10474 fc_packet_t *pkt;
10475 CHANNEL *cp;
10476
10477 pkt = PRIV2PKT(sbp);
10478
10479 /* Check one more time that the pkt has not already been returned */
10480 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10481 return;
10482 }
10483
10484 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10485 emlxs_unswap_pkt(sbp);
10486 #endif /* EMLXS_MODREV2X */
10487
10488 mutex_enter(&sbp->mtx);
10489 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
10490 mutex_exit(&sbp->mtx);
10491
10492 if (pkt->pkt_comp) {
10493 #ifdef FMA_SUPPORT
10494 emlxs_check_dma(hba, sbp);
10495 #endif /* FMA_SUPPORT */
10496
10497 if (sbp->channel) {
10498 cp = (CHANNEL *)sbp->channel;
10499 cp->ulpCmplCmd++;
10500 }
10501
10502 (*pkt->pkt_comp) (pkt);
10503 }
10504
10505 return;
10506
10507 } /* emlxs_iodone() */
10508
10509
10510
10511 extern fc_unsol_buf_t *
emlxs_ub_find(emlxs_port_t * port,uint32_t token)10512 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
10513 {
10514 emlxs_unsol_buf_t *pool;
10515 fc_unsol_buf_t *ubp;
10516 emlxs_ub_priv_t *ub_priv;
10517
10518 /* Check if this is a valid ub token */
10519 if (token < EMLXS_UB_TOKEN_OFFSET) {
10520 return (NULL);
10521 }
10522
10523 mutex_enter(&EMLXS_UB_LOCK);
10524
10525 pool = port->ub_pool;
10526 while (pool) {
10527 /* Find a pool with the proper token range */
10528 if (token >= pool->pool_first_token &&
10529 token <= pool->pool_last_token) {
10530 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
10531 pool->pool_first_token)];
10532 ub_priv = ubp->ub_fca_private;
10533
10534 if (ub_priv->token != token) {
10535 EMLXS_MSGF(EMLXS_CONTEXT,
10536 &emlxs_sfs_debug_msg,
10537 "ub_find: Invalid token=%x", ubp, token,
10538 ub_priv->token);
10539
10540 ubp = NULL;
10541 }
10542
10543 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
10544 EMLXS_MSGF(EMLXS_CONTEXT,
10545 &emlxs_sfs_debug_msg,
10546 "ub_find: Buffer not in use. buffer=%p "
10547 "token=%x", ubp, token);
10548
10549 ubp = NULL;
10550 }
10551
10552 mutex_exit(&EMLXS_UB_LOCK);
10553
10554 return (ubp);
10555 }
10556
10557 pool = pool->pool_next;
10558 }
10559
10560 mutex_exit(&EMLXS_UB_LOCK);
10561
10562 return (NULL);
10563
10564 } /* emlxs_ub_find() */
10565
10566
10567
10568 extern fc_unsol_buf_t *
emlxs_ub_get(emlxs_port_t * port,uint32_t size,uint32_t type,uint32_t reserve)10569 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
10570 uint32_t reserve)
10571 {
10572 emlxs_hba_t *hba = HBA;
10573 emlxs_unsol_buf_t *pool;
10574 fc_unsol_buf_t *ubp;
10575 emlxs_ub_priv_t *ub_priv;
10576 uint32_t i;
10577 uint32_t resv_flag;
10578 uint32_t pool_free;
10579 uint32_t pool_free_resv;
10580
10581 mutex_enter(&EMLXS_UB_LOCK);
10582
10583 pool = port->ub_pool;
10584 while (pool) {
10585 /* Find a pool of the appropriate type and size */
10586 if ((pool->pool_available == 0) ||
10587 (pool->pool_type != type) ||
10588 (pool->pool_buf_size < size)) {
10589 goto next_pool;
10590 }
10591
10592
10593 /* Adjust free counts based on availablity */
10594 /* The free reserve count gets first priority */
10595 pool_free_resv =
10596 min(pool->pool_free_resv, pool->pool_available);
10597 pool_free =
10598 min(pool->pool_free,
10599 (pool->pool_available - pool_free_resv));
10600
10601 /* Initialize reserve flag */
10602 resv_flag = reserve;
10603
10604 if (resv_flag) {
10605 if (pool_free_resv == 0) {
10606 if (pool_free == 0) {
10607 goto next_pool;
10608 }
10609 resv_flag = 0;
10610 }
10611 } else if (pool_free == 0) {
10612 goto next_pool;
10613 }
10614
10615 /* Find next available free buffer in this pool */
10616 for (i = 0; i < pool->pool_nentries; i++) {
10617 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
10618 ub_priv = ubp->ub_fca_private;
10619
10620 if (!ub_priv->available ||
10621 ub_priv->flags != EMLXS_UB_FREE) {
10622 continue;
10623 }
10624
10625 ub_priv->time = hba->timer_tics;
10626
10627 /* Timeout in 5 minutes */
10628 ub_priv->timeout = (5 * 60);
10629
10630 ub_priv->flags = EMLXS_UB_IN_USE;
10631
10632 /* Alloc the buffer from the pool */
10633 if (resv_flag) {
10634 ub_priv->flags |= EMLXS_UB_RESV;
10635 pool->pool_free_resv--;
10636 } else {
10637 pool->pool_free--;
10638 }
10639
10640 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
10641 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
10642 ub_priv->token, pool->pool_nentries,
10643 pool->pool_available, pool->pool_free,
10644 pool->pool_free_resv);
10645
10646 mutex_exit(&EMLXS_UB_LOCK);
10647
10648 return (ubp);
10649 }
10650 next_pool:
10651
10652 pool = pool->pool_next;
10653 }
10654
10655 mutex_exit(&EMLXS_UB_LOCK);
10656
10657 return (NULL);
10658
10659 } /* emlxs_ub_get() */
10660
10661
10662
10663 extern void
emlxs_set_pkt_state(emlxs_buf_t * sbp,uint32_t iostat,uint8_t localstat,uint32_t lock)10664 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10665 uint32_t lock)
10666 {
10667 fc_packet_t *pkt;
10668 fcp_rsp_t *fcp_rsp;
10669 uint32_t i;
10670 emlxs_xlat_err_t *tptr;
10671 emlxs_xlat_err_t *entry;
10672
10673
10674 pkt = PRIV2PKT(sbp);
10675
10676 /* Warning: Some FCT sbp's don't have */
10677 /* fc_packet objects, so just return */
10678 if (!pkt) {
10679 return;
10680 }
10681
10682 if (lock) {
10683 mutex_enter(&sbp->mtx);
10684 }
10685
10686 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10687 sbp->pkt_flags |= PACKET_STATE_VALID;
10688
10689 /* Perform table lookup */
10690 entry = NULL;
10691 if (iostat != IOSTAT_LOCAL_REJECT) {
10692 tptr = emlxs_iostat_tbl;
10693 for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
10694 if (iostat == tptr->emlxs_status) {
10695 entry = tptr;
10696 break;
10697 }
10698 }
10699 } else { /* iostate == IOSTAT_LOCAL_REJECT */
10700
10701 tptr = emlxs_ioerr_tbl;
10702 for (i = 0; i < IOERR_MAX; i++, tptr++) {
10703 if (localstat == tptr->emlxs_status) {
10704 entry = tptr;
10705 break;
10706 }
10707 }
10708 }
10709
10710 if (entry) {
10711 pkt->pkt_state = entry->pkt_state;
10712 pkt->pkt_reason = entry->pkt_reason;
10713 pkt->pkt_expln = entry->pkt_expln;
10714 pkt->pkt_action = entry->pkt_action;
10715 } else {
10716 /* Set defaults */
10717 pkt->pkt_state = FC_PKT_TRAN_ERROR;
10718 pkt->pkt_reason = FC_REASON_ABORTED;
10719 pkt->pkt_expln = FC_EXPLN_NONE;
10720 pkt->pkt_action = FC_ACTION_RETRYABLE;
10721 }
10722
10723
10724 /* Set the residual counts and response frame */
10725 /* Check if response frame was received from the chip */
10726 /* If so, then the residual counts will already be set */
10727 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
10728 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
10729 /* We have to create the response frame */
10730 if (iostat == IOSTAT_SUCCESS) {
10731 pkt->pkt_resp_resid = 0;
10732 pkt->pkt_data_resid = 0;
10733
10734 if ((pkt->pkt_cmd_fhdr.type ==
10735 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
10736 pkt->pkt_resp) {
10737 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
10738
10739 fcp_rsp->fcp_u.fcp_status.
10740 rsp_len_set = 1;
10741 fcp_rsp->fcp_response_len = 8;
10742 }
10743 } else {
10744 /* Otherwise assume no data */
10745 /* and no response received */
10746 pkt->pkt_data_resid = pkt->pkt_datalen;
10747 pkt->pkt_resp_resid = pkt->pkt_rsplen;
10748 }
10749 }
10750 }
10751
10752 if (lock) {
10753 mutex_exit(&sbp->mtx);
10754 }
10755
10756 return;
10757
10758 } /* emlxs_set_pkt_state() */
10759
10760
10761 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10762
10763 extern void
emlxs_swap_service_params(SERV_PARM * sp)10764 emlxs_swap_service_params(SERV_PARM *sp)
10765 {
10766 uint16_t *p;
10767 int size;
10768 int i;
10769
10770 size = (sizeof (CSP) - 4) / 2;
10771 p = (uint16_t *)&sp->cmn;
10772 for (i = 0; i < size; i++) {
10773 p[i] = LE_SWAP16(p[i]);
10774 }
10775 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
10776
10777 size = sizeof (CLASS_PARMS) / 2;
10778 p = (uint16_t *)&sp->cls1;
10779 for (i = 0; i < size; i++, p++) {
10780 *p = LE_SWAP16(*p);
10781 }
10782
10783 size = sizeof (CLASS_PARMS) / 2;
10784 p = (uint16_t *)&sp->cls2;
10785 for (i = 0; i < size; i++, p++) {
10786 *p = LE_SWAP16(*p);
10787 }
10788
10789 size = sizeof (CLASS_PARMS) / 2;
10790 p = (uint16_t *)&sp->cls3;
10791 for (i = 0; i < size; i++, p++) {
10792 *p = LE_SWAP16(*p);
10793 }
10794
10795 size = sizeof (CLASS_PARMS) / 2;
10796 p = (uint16_t *)&sp->cls4;
10797 for (i = 0; i < size; i++, p++) {
10798 *p = LE_SWAP16(*p);
10799 }
10800
10801 return;
10802
10803 } /* emlxs_swap_service_params() */
10804
10805 extern void
emlxs_unswap_pkt(emlxs_buf_t * sbp)10806 emlxs_unswap_pkt(emlxs_buf_t *sbp)
10807 {
10808 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10809 emlxs_swap_fcp_pkt(sbp);
10810 }
10811
10812 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10813 emlxs_swap_els_pkt(sbp);
10814 }
10815
10816 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10817 emlxs_swap_ct_pkt(sbp);
10818 }
10819
10820 } /* emlxs_unswap_pkt() */
10821
10822
10823 extern void
emlxs_swap_fcp_pkt(emlxs_buf_t * sbp)10824 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
10825 {
10826 fc_packet_t *pkt;
10827 FCP_CMND *cmd;
10828 fcp_rsp_t *rsp;
10829 uint16_t *lunp;
10830 uint32_t i;
10831
10832 mutex_enter(&sbp->mtx);
10833
10834 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10835 mutex_exit(&sbp->mtx);
10836 return;
10837 }
10838
10839 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10840 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
10841 } else {
10842 sbp->pkt_flags |= PACKET_FCP_SWAPPED;
10843 }
10844
10845 mutex_exit(&sbp->mtx);
10846
10847 pkt = PRIV2PKT(sbp);
10848
10849 cmd = (FCP_CMND *)pkt->pkt_cmd;
10850 rsp = (pkt->pkt_rsplen &&
10851 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
10852 (fcp_rsp_t *)pkt->pkt_resp : NULL;
10853
10854 /* The size of data buffer needs to be swapped. */
10855 cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
10856
10857 /*
10858 * Swap first 2 words of FCP CMND payload.
10859 */
10860 lunp = (uint16_t *)&cmd->fcpLunMsl;
10861 for (i = 0; i < 4; i++) {
10862 lunp[i] = LE_SWAP16(lunp[i]);
10863 }
10864
10865 if (rsp) {
10866 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
10867 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
10868 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
10869 }
10870
10871 return;
10872
10873 } /* emlxs_swap_fcp_pkt() */
10874
10875
10876 extern void
emlxs_swap_els_pkt(emlxs_buf_t * sbp)10877 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
10878 {
10879 fc_packet_t *pkt;
10880 uint32_t *cmd;
10881 uint32_t *rsp;
10882 uint32_t command;
10883 uint16_t *c;
10884 uint32_t i;
10885 uint32_t swapped;
10886
10887 mutex_enter(&sbp->mtx);
10888
10889 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10890 mutex_exit(&sbp->mtx);
10891 return;
10892 }
10893
10894 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10895 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
10896 swapped = 1;
10897 } else {
10898 sbp->pkt_flags |= PACKET_ELS_SWAPPED;
10899 swapped = 0;
10900 }
10901
10902 mutex_exit(&sbp->mtx);
10903
10904 pkt = PRIV2PKT(sbp);
10905
10906 cmd = (uint32_t *)pkt->pkt_cmd;
10907 rsp = (pkt->pkt_rsplen &&
10908 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10909 (uint32_t *)pkt->pkt_resp : NULL;
10910
10911 if (!swapped) {
10912 cmd[0] = LE_SWAP32(cmd[0]);
10913 command = cmd[0] & ELS_CMD_MASK;
10914 } else {
10915 command = cmd[0] & ELS_CMD_MASK;
10916 cmd[0] = LE_SWAP32(cmd[0]);
10917 }
10918
10919 if (rsp) {
10920 rsp[0] = LE_SWAP32(rsp[0]);
10921 }
10922
10923 switch (command) {
10924 case ELS_CMD_ACC:
10925 if (sbp->ucmd == ELS_CMD_ADISC) {
10926 /* Hard address of originator */
10927 cmd[1] = LE_SWAP32(cmd[1]);
10928
10929 /* N_Port ID of originator */
10930 cmd[6] = LE_SWAP32(cmd[6]);
10931 }
10932 break;
10933
10934 case ELS_CMD_PLOGI:
10935 case ELS_CMD_FLOGI:
10936 case ELS_CMD_FDISC:
10937 if (rsp) {
10938 emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10939 }
10940 break;
10941
10942 case ELS_CMD_LOGO:
10943 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */
10944 break;
10945
10946 case ELS_CMD_RLS:
10947 cmd[1] = LE_SWAP32(cmd[1]);
10948
10949 if (rsp) {
10950 for (i = 0; i < 6; i++) {
10951 rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10952 }
10953 }
10954 break;
10955
10956 case ELS_CMD_ADISC:
10957 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */
10958 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */
10959 break;
10960
10961 case ELS_CMD_PRLI:
10962 c = (uint16_t *)&cmd[1];
10963 c[1] = LE_SWAP16(c[1]);
10964
10965 cmd[4] = LE_SWAP32(cmd[4]);
10966
10967 if (rsp) {
10968 rsp[4] = LE_SWAP32(rsp[4]);
10969 }
10970 break;
10971
10972 case ELS_CMD_SCR:
10973 cmd[1] = LE_SWAP32(cmd[1]);
10974 break;
10975
10976 case ELS_CMD_LINIT:
10977 if (rsp) {
10978 rsp[1] = LE_SWAP32(rsp[1]);
10979 }
10980 break;
10981
10982 default:
10983 break;
10984 }
10985
10986 return;
10987
10988 } /* emlxs_swap_els_pkt() */
10989
10990
10991 extern void
emlxs_swap_ct_pkt(emlxs_buf_t * sbp)10992 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10993 {
10994 fc_packet_t *pkt;
10995 uint32_t *cmd;
10996 uint32_t *rsp;
10997 uint32_t command;
10998 uint32_t i;
10999 uint32_t swapped;
11000
11001 mutex_enter(&sbp->mtx);
11002
11003 if (sbp->pkt_flags & PACKET_ALLOCATED) {
11004 mutex_exit(&sbp->mtx);
11005 return;
11006 }
11007
11008 if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
11009 sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
11010 swapped = 1;
11011 } else {
11012 sbp->pkt_flags |= PACKET_CT_SWAPPED;
11013 swapped = 0;
11014 }
11015
11016 mutex_exit(&sbp->mtx);
11017
11018 pkt = PRIV2PKT(sbp);
11019
11020 cmd = (uint32_t *)pkt->pkt_cmd;
11021 rsp = (pkt->pkt_rsplen &&
11022 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
11023 (uint32_t *)pkt->pkt_resp : NULL;
11024
11025 if (!swapped) {
11026 cmd[0] = 0x01000000;
11027 command = cmd[2];
11028 }
11029
11030 cmd[0] = LE_SWAP32(cmd[0]);
11031 cmd[1] = LE_SWAP32(cmd[1]);
11032 cmd[2] = LE_SWAP32(cmd[2]);
11033 cmd[3] = LE_SWAP32(cmd[3]);
11034
11035 if (swapped) {
11036 command = cmd[2];
11037 }
11038
11039 switch ((command >> 16)) {
11040 case SLI_CTNS_GA_NXT:
11041 cmd[4] = LE_SWAP32(cmd[4]);
11042 break;
11043
11044 case SLI_CTNS_GPN_ID:
11045 case SLI_CTNS_GNN_ID:
11046 case SLI_CTNS_RPN_ID:
11047 case SLI_CTNS_RNN_ID:
11048 case SLI_CTNS_RSPN_ID:
11049 cmd[4] = LE_SWAP32(cmd[4]);
11050 break;
11051
11052 case SLI_CTNS_RCS_ID:
11053 case SLI_CTNS_RPT_ID:
11054 cmd[4] = LE_SWAP32(cmd[4]);
11055 cmd[5] = LE_SWAP32(cmd[5]);
11056 break;
11057
11058 case SLI_CTNS_RFT_ID:
11059 cmd[4] = LE_SWAP32(cmd[4]);
11060
11061 /* Swap FC4 types */
11062 for (i = 0; i < 8; i++) {
11063 cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
11064 }
11065 break;
11066
11067 case SLI_CTNS_GFT_ID:
11068 if (rsp) {
11069 /* Swap FC4 types */
11070 for (i = 0; i < 8; i++) {
11071 rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
11072 }
11073 }
11074 break;
11075
11076 case SLI_CTNS_GCS_ID:
11077 case SLI_CTNS_GSPN_ID:
11078 case SLI_CTNS_GSNN_NN:
11079 case SLI_CTNS_GIP_NN:
11080 case SLI_CTNS_GIPA_NN:
11081
11082 case SLI_CTNS_GPT_ID:
11083 case SLI_CTNS_GID_NN:
11084 case SLI_CTNS_GNN_IP:
11085 case SLI_CTNS_GIPA_IP:
11086 case SLI_CTNS_GID_FT:
11087 case SLI_CTNS_GID_PT:
11088 case SLI_CTNS_GID_PN:
11089 case SLI_CTNS_RIP_NN:
11090 case SLI_CTNS_RIPA_NN:
11091 case SLI_CTNS_RSNN_NN:
11092 case SLI_CTNS_DA_ID:
11093 case SLI_CT_RESPONSE_FS_RJT:
11094 case SLI_CT_RESPONSE_FS_ACC:
11095
11096 default:
11097 break;
11098 }
11099 return;
11100
11101 } /* emlxs_swap_ct_pkt() */
11102
11103
11104 extern void
emlxs_swap_els_ub(fc_unsol_buf_t * ubp)11105 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
11106 {
11107 emlxs_ub_priv_t *ub_priv;
11108 fc_rscn_t *rscn;
11109 uint32_t count;
11110 uint32_t i;
11111 uint32_t *lp;
11112 la_els_logi_t *logi;
11113
11114 ub_priv = ubp->ub_fca_private;
11115
11116 switch (ub_priv->cmd) {
11117 case ELS_CMD_RSCN:
11118 rscn = (fc_rscn_t *)ubp->ub_buffer;
11119
11120 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
11121
11122 count = ((rscn->rscn_payload_len - 4) / 4);
11123 lp = (uint32_t *)ubp->ub_buffer + 1;
11124 for (i = 0; i < count; i++, lp++) {
11125 *lp = LE_SWAP32(*lp);
11126 }
11127
11128 break;
11129
11130 case ELS_CMD_FLOGI:
11131 case ELS_CMD_PLOGI:
11132 case ELS_CMD_FDISC:
11133 case ELS_CMD_PDISC:
11134 logi = (la_els_logi_t *)ubp->ub_buffer;
11135 emlxs_swap_service_params(
11136 (SERV_PARM *)&logi->common_service);
11137 break;
11138
11139 /* ULP handles this */
11140 case ELS_CMD_LOGO:
11141 case ELS_CMD_PRLI:
11142 case ELS_CMD_PRLO:
11143 case ELS_CMD_ADISC:
11144 default:
11145 break;
11146 }
11147
11148 return;
11149
11150 } /* emlxs_swap_els_ub() */
11151
11152
11153 #endif /* EMLXS_MODREV2X */
11154
11155
11156 extern char *
emlxs_mode_xlate(uint32_t mode)11157 emlxs_mode_xlate(uint32_t mode)
11158 {
11159 static char buffer[32];
11160 uint32_t i;
11161 uint32_t count;
11162
11163 count = sizeof (emlxs_mode_table) / sizeof (emlxs_table_t);
11164 for (i = 0; i < count; i++) {
11165 if (mode == emlxs_mode_table[i].code) {
11166 return (emlxs_mode_table[i].string);
11167 }
11168 }
11169
11170 (void) snprintf(buffer, sizeof (buffer), "Unknown (%x)", mode);
11171 return (buffer);
11172
11173 } /* emlxs_mode_xlate() */
11174
11175
11176 extern char *
emlxs_elscmd_xlate(uint32_t elscmd)11177 emlxs_elscmd_xlate(uint32_t elscmd)
11178 {
11179 static char buffer[32];
11180 uint32_t i;
11181 uint32_t count;
11182
11183 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
11184 for (i = 0; i < count; i++) {
11185 if (elscmd == emlxs_elscmd_table[i].code) {
11186 return (emlxs_elscmd_table[i].string);
11187 }
11188 }
11189
11190 (void) snprintf(buffer, sizeof (buffer), "ELS=0x%x", elscmd);
11191 return (buffer);
11192
11193 } /* emlxs_elscmd_xlate() */
11194
11195
11196 extern char *
emlxs_ctcmd_xlate(uint32_t ctcmd)11197 emlxs_ctcmd_xlate(uint32_t ctcmd)
11198 {
11199 static char buffer[32];
11200 uint32_t i;
11201 uint32_t count;
11202
11203 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
11204 for (i = 0; i < count; i++) {
11205 if (ctcmd == emlxs_ctcmd_table[i].code) {
11206 return (emlxs_ctcmd_table[i].string);
11207 }
11208 }
11209
11210 (void) snprintf(buffer, sizeof (buffer), "cmd=0x%x", ctcmd);
11211 return (buffer);
11212
11213 } /* emlxs_ctcmd_xlate() */
11214
11215
11216 #ifdef MENLO_SUPPORT
11217 extern char *
emlxs_menlo_cmd_xlate(uint32_t cmd)11218 emlxs_menlo_cmd_xlate(uint32_t cmd)
11219 {
11220 static char buffer[32];
11221 uint32_t i;
11222 uint32_t count;
11223
11224 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
11225 for (i = 0; i < count; i++) {
11226 if (cmd == emlxs_menlo_cmd_table[i].code) {
11227 return (emlxs_menlo_cmd_table[i].string);
11228 }
11229 }
11230
11231 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
11232 return (buffer);
11233
11234 } /* emlxs_menlo_cmd_xlate() */
11235
11236 extern char *
emlxs_menlo_rsp_xlate(uint32_t rsp)11237 emlxs_menlo_rsp_xlate(uint32_t rsp)
11238 {
11239 static char buffer[32];
11240 uint32_t i;
11241 uint32_t count;
11242
11243 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
11244 for (i = 0; i < count; i++) {
11245 if (rsp == emlxs_menlo_rsp_table[i].code) {
11246 return (emlxs_menlo_rsp_table[i].string);
11247 }
11248 }
11249
11250 (void) snprintf(buffer, sizeof (buffer), "Rsp=0x%x", rsp);
11251 return (buffer);
11252
11253 } /* emlxs_menlo_rsp_xlate() */
11254
11255 #endif /* MENLO_SUPPORT */
11256
11257
11258 extern char *
emlxs_rmcmd_xlate(uint32_t rmcmd)11259 emlxs_rmcmd_xlate(uint32_t rmcmd)
11260 {
11261 static char buffer[32];
11262 uint32_t i;
11263 uint32_t count;
11264
11265 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
11266 for (i = 0; i < count; i++) {
11267 if (rmcmd == emlxs_rmcmd_table[i].code) {
11268 return (emlxs_rmcmd_table[i].string);
11269 }
11270 }
11271
11272 (void) snprintf(buffer, sizeof (buffer), "RM=0x%x", rmcmd);
11273 return (buffer);
11274
11275 } /* emlxs_rmcmd_xlate() */
11276
11277
11278
11279 extern char *
emlxs_mscmd_xlate(uint16_t mscmd)11280 emlxs_mscmd_xlate(uint16_t mscmd)
11281 {
11282 static char buffer[32];
11283 uint32_t i;
11284 uint32_t count;
11285
11286 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
11287 for (i = 0; i < count; i++) {
11288 if (mscmd == emlxs_mscmd_table[i].code) {
11289 return (emlxs_mscmd_table[i].string);
11290 }
11291 }
11292
11293 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", mscmd);
11294 return (buffer);
11295
11296 } /* emlxs_mscmd_xlate() */
11297
11298
11299 extern char *
emlxs_state_xlate(uint8_t state)11300 emlxs_state_xlate(uint8_t state)
11301 {
11302 static char buffer[32];
11303 uint32_t i;
11304 uint32_t count;
11305
11306 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
11307 for (i = 0; i < count; i++) {
11308 if (state == emlxs_state_table[i].code) {
11309 return (emlxs_state_table[i].string);
11310 }
11311 }
11312
11313 (void) snprintf(buffer, sizeof (buffer), "State=0x%x", state);
11314 return (buffer);
11315
11316 } /* emlxs_state_xlate() */
11317
11318
11319 extern char *
emlxs_error_xlate(uint8_t errno)11320 emlxs_error_xlate(uint8_t errno)
11321 {
11322 static char buffer[32];
11323 uint32_t i;
11324 uint32_t count;
11325
11326 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
11327 for (i = 0; i < count; i++) {
11328 if (errno == emlxs_error_table[i].code) {
11329 return (emlxs_error_table[i].string);
11330 }
11331 }
11332
11333 (void) snprintf(buffer, sizeof (buffer), "Errno=0x%x", errno);
11334 return (buffer);
11335
11336 } /* emlxs_error_xlate() */
11337
11338
11339 static int
emlxs_pm_lower_power(dev_info_t * dip)11340 emlxs_pm_lower_power(dev_info_t *dip)
11341 {
11342 int ddiinst;
11343 int emlxinst;
11344 emlxs_config_t *cfg;
11345 int32_t rval;
11346 emlxs_hba_t *hba;
11347
11348 ddiinst = ddi_get_instance(dip);
11349 emlxinst = emlxs_get_instance(ddiinst);
11350 hba = emlxs_device.hba[emlxinst];
11351 cfg = &CFG;
11352
11353 rval = DDI_SUCCESS;
11354
11355 /* Lower the power level */
11356 if (cfg[CFG_PM_SUPPORT].current) {
11357 rval =
11358 pm_lower_power(dip, EMLXS_PM_ADAPTER,
11359 EMLXS_PM_ADAPTER_DOWN);
11360 } else {
11361 /* We do not have kernel support of power management enabled */
11362 /* therefore, call our power management routine directly */
11363 rval =
11364 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
11365 }
11366
11367 return (rval);
11368
11369 } /* emlxs_pm_lower_power() */
11370
11371
11372 static int
emlxs_pm_raise_power(dev_info_t * dip)11373 emlxs_pm_raise_power(dev_info_t *dip)
11374 {
11375 int ddiinst;
11376 int emlxinst;
11377 emlxs_config_t *cfg;
11378 int32_t rval;
11379 emlxs_hba_t *hba;
11380
11381 ddiinst = ddi_get_instance(dip);
11382 emlxinst = emlxs_get_instance(ddiinst);
11383 hba = emlxs_device.hba[emlxinst];
11384 cfg = &CFG;
11385
11386 /* Raise the power level */
11387 if (cfg[CFG_PM_SUPPORT].current) {
11388 rval =
11389 pm_raise_power(dip, EMLXS_PM_ADAPTER,
11390 EMLXS_PM_ADAPTER_UP);
11391 } else {
11392 /* We do not have kernel support of power management enabled */
11393 /* therefore, call our power management routine directly */
11394 rval =
11395 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
11396 }
11397
11398 return (rval);
11399
11400 } /* emlxs_pm_raise_power() */
11401
11402
11403 #ifdef IDLE_TIMER
11404
11405 extern int
emlxs_pm_busy_component(emlxs_hba_t * hba)11406 emlxs_pm_busy_component(emlxs_hba_t *hba)
11407 {
11408 emlxs_config_t *cfg = &CFG;
11409 int rval;
11410
11411 hba->pm_active = 1;
11412
11413 if (hba->pm_busy) {
11414 return (DDI_SUCCESS);
11415 }
11416
11417 mutex_enter(&EMLXS_PM_LOCK);
11418
11419 if (hba->pm_busy) {
11420 mutex_exit(&EMLXS_PM_LOCK);
11421 return (DDI_SUCCESS);
11422 }
11423 hba->pm_busy = 1;
11424
11425 mutex_exit(&EMLXS_PM_LOCK);
11426
11427 /* Attempt to notify system that we are busy */
11428 if (cfg[CFG_PM_SUPPORT].current) {
11429 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11430 "pm_busy_component.");
11431
11432 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
11433
11434 if (rval != DDI_SUCCESS) {
11435 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11436 "pm_busy_component failed. ret=%d", rval);
11437
11438 /* If this attempt failed then clear our flags */
11439 mutex_enter(&EMLXS_PM_LOCK);
11440 hba->pm_busy = 0;
11441 mutex_exit(&EMLXS_PM_LOCK);
11442
11443 return (rval);
11444 }
11445 }
11446
11447 return (DDI_SUCCESS);
11448
11449 } /* emlxs_pm_busy_component() */
11450
11451
11452 extern int
emlxs_pm_idle_component(emlxs_hba_t * hba)11453 emlxs_pm_idle_component(emlxs_hba_t *hba)
11454 {
11455 emlxs_config_t *cfg = &CFG;
11456 int rval;
11457
11458 if (!hba->pm_busy) {
11459 return (DDI_SUCCESS);
11460 }
11461
11462 mutex_enter(&EMLXS_PM_LOCK);
11463
11464 if (!hba->pm_busy) {
11465 mutex_exit(&EMLXS_PM_LOCK);
11466 return (DDI_SUCCESS);
11467 }
11468 hba->pm_busy = 0;
11469
11470 mutex_exit(&EMLXS_PM_LOCK);
11471
11472 if (cfg[CFG_PM_SUPPORT].current) {
11473 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11474 "pm_idle_component.");
11475
11476 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
11477
11478 if (rval != DDI_SUCCESS) {
11479 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11480 "pm_idle_component failed. ret=%d", rval);
11481
11482 /* If this attempt failed then */
11483 /* reset our flags for another attempt */
11484 mutex_enter(&EMLXS_PM_LOCK);
11485 hba->pm_busy = 1;
11486 mutex_exit(&EMLXS_PM_LOCK);
11487
11488 return (rval);
11489 }
11490 }
11491
11492 return (DDI_SUCCESS);
11493
11494 } /* emlxs_pm_idle_component() */
11495
11496
11497 extern void
emlxs_pm_idle_timer(emlxs_hba_t * hba)11498 emlxs_pm_idle_timer(emlxs_hba_t *hba)
11499 {
11500 emlxs_config_t *cfg = &CFG;
11501
11502 if (hba->pm_active) {
11503 /* Clear active flag and reset idle timer */
11504 mutex_enter(&EMLXS_PM_LOCK);
11505 hba->pm_active = 0;
11506 hba->pm_idle_timer =
11507 hba->timer_tics + cfg[CFG_PM_IDLE].current;
11508 mutex_exit(&EMLXS_PM_LOCK);
11509 }
11510
11511 /* Check for idle timeout */
11512 else if (hba->timer_tics >= hba->pm_idle_timer) {
11513 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
11514 mutex_enter(&EMLXS_PM_LOCK);
11515 hba->pm_idle_timer =
11516 hba->timer_tics + cfg[CFG_PM_IDLE].current;
11517 mutex_exit(&EMLXS_PM_LOCK);
11518 }
11519 }
11520
11521 return;
11522
11523 } /* emlxs_pm_idle_timer() */
11524
11525 #endif /* IDLE_TIMER */
11526
11527
11528 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
11529 static void
emlxs_read_vport_prop(emlxs_hba_t * hba)11530 emlxs_read_vport_prop(emlxs_hba_t *hba)
11531 {
11532 emlxs_port_t *port = &PPORT;
11533 emlxs_config_t *cfg = &CFG;
11534 char **arrayp;
11535 uint8_t *s;
11536 uint8_t *np;
11537 NAME_TYPE pwwpn;
11538 NAME_TYPE wwnn;
11539 NAME_TYPE wwpn;
11540 uint32_t vpi;
11541 uint32_t cnt;
11542 uint32_t rval;
11543 uint32_t i;
11544 uint32_t j;
11545 uint32_t c1;
11546 uint32_t sum;
11547 uint32_t errors;
11548 char buffer[64];
11549
11550 /* Check for the per adapter vport setting */
11551 (void) snprintf(buffer, sizeof (buffer), "%s%d-vport", DRIVER_NAME,
11552 hba->ddiinst);
11553 cnt = 0;
11554 arrayp = NULL;
11555 rval =
11556 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11557 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
11558
11559 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11560 /* Check for the global vport setting */
11561 cnt = 0;
11562 arrayp = NULL;
11563 rval =
11564 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11565 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
11566 }
11567
11568 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11569 return;
11570 }
11571
11572 for (i = 0; i < cnt; i++) {
11573 errors = 0;
11574 s = (uint8_t *)arrayp[i];
11575
11576 if (!s) {
11577 break;
11578 }
11579
11580 np = (uint8_t *)&pwwpn;
11581 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11582 c1 = *s++;
11583 if ((c1 >= '0') && (c1 <= '9')) {
11584 sum = ((c1 - '0') << 4);
11585 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11586 sum = ((c1 - 'a' + 10) << 4);
11587 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11588 sum = ((c1 - 'A' + 10) << 4);
11589 } else {
11590 EMLXS_MSGF(EMLXS_CONTEXT,
11591 &emlxs_attach_debug_msg,
11592 "Config error: Invalid PWWPN found. "
11593 "entry=%d byte=%d hi_nibble=%c",
11594 i, j, c1);
11595 errors++;
11596 }
11597
11598 c1 = *s++;
11599 if ((c1 >= '0') && (c1 <= '9')) {
11600 sum |= (c1 - '0');
11601 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11602 sum |= (c1 - 'a' + 10);
11603 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11604 sum |= (c1 - 'A' + 10);
11605 } else {
11606 EMLXS_MSGF(EMLXS_CONTEXT,
11607 &emlxs_attach_debug_msg,
11608 "Config error: Invalid PWWPN found. "
11609 "entry=%d byte=%d lo_nibble=%c",
11610 i, j, c1);
11611 errors++;
11612 }
11613
11614 *np++ = (uint8_t)sum;
11615 }
11616
11617 if (*s++ != ':') {
11618 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11619 "Config error: Invalid delimiter after PWWPN. "
11620 "entry=%d", i);
11621 goto out;
11622 }
11623
11624 np = (uint8_t *)&wwnn;
11625 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11626 c1 = *s++;
11627 if ((c1 >= '0') && (c1 <= '9')) {
11628 sum = ((c1 - '0') << 4);
11629 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11630 sum = ((c1 - 'a' + 10) << 4);
11631 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11632 sum = ((c1 - 'A' + 10) << 4);
11633 } else {
11634 EMLXS_MSGF(EMLXS_CONTEXT,
11635 &emlxs_attach_debug_msg,
11636 "Config error: Invalid WWNN found. "
11637 "entry=%d byte=%d hi_nibble=%c",
11638 i, j, c1);
11639 errors++;
11640 }
11641
11642 c1 = *s++;
11643 if ((c1 >= '0') && (c1 <= '9')) {
11644 sum |= (c1 - '0');
11645 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11646 sum |= (c1 - 'a' + 10);
11647 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11648 sum |= (c1 - 'A' + 10);
11649 } else {
11650 EMLXS_MSGF(EMLXS_CONTEXT,
11651 &emlxs_attach_debug_msg,
11652 "Config error: Invalid WWNN found. "
11653 "entry=%d byte=%d lo_nibble=%c",
11654 i, j, c1);
11655 errors++;
11656 }
11657
11658 *np++ = (uint8_t)sum;
11659 }
11660
11661 if (*s++ != ':') {
11662 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11663 "Config error: Invalid delimiter after WWNN. "
11664 "entry=%d", i);
11665 goto out;
11666 }
11667
11668 np = (uint8_t *)&wwpn;
11669 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11670 c1 = *s++;
11671 if ((c1 >= '0') && (c1 <= '9')) {
11672 sum = ((c1 - '0') << 4);
11673 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11674 sum = ((c1 - 'a' + 10) << 4);
11675 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11676 sum = ((c1 - 'A' + 10) << 4);
11677 } else {
11678 EMLXS_MSGF(EMLXS_CONTEXT,
11679 &emlxs_attach_debug_msg,
11680 "Config error: Invalid WWPN found. "
11681 "entry=%d byte=%d hi_nibble=%c",
11682 i, j, c1);
11683
11684 errors++;
11685 }
11686
11687 c1 = *s++;
11688 if ((c1 >= '0') && (c1 <= '9')) {
11689 sum |= (c1 - '0');
11690 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11691 sum |= (c1 - 'a' + 10);
11692 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11693 sum |= (c1 - 'A' + 10);
11694 } else {
11695 EMLXS_MSGF(EMLXS_CONTEXT,
11696 &emlxs_attach_debug_msg,
11697 "Config error: Invalid WWPN found. "
11698 "entry=%d byte=%d lo_nibble=%c",
11699 i, j, c1);
11700
11701 errors++;
11702 }
11703
11704 *np++ = (uint8_t)sum;
11705 }
11706
11707 if (*s++ != ':') {
11708 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11709 "Config error: Invalid delimiter after WWPN. "
11710 "entry=%d", i);
11711
11712 goto out;
11713 }
11714
11715 sum = 0;
11716 do {
11717 c1 = *s++;
11718 if ((c1 < '0') || (c1 > '9')) {
11719 EMLXS_MSGF(EMLXS_CONTEXT,
11720 &emlxs_attach_debug_msg,
11721 "Config error: Invalid VPI found. "
11722 "entry=%d c=%c vpi=%d", i, c1, sum);
11723
11724 goto out;
11725 }
11726
11727 sum = (sum * 10) + (c1 - '0');
11728
11729 } while (*s != 0);
11730
11731 vpi = sum;
11732
11733 if (errors) {
11734 continue;
11735 }
11736
11737 /* Entry has been read */
11738
11739 /* Check if the physical port wwpn */
11740 /* matches our physical port wwpn */
11741 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
11742 continue;
11743 }
11744
11745 /* Check vpi range */
11746 if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
11747 continue;
11748 }
11749
11750 /* Check if port has already been configured */
11751 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
11752 continue;
11753 }
11754
11755 /* Set the highest configured vpi */
11756 if (vpi > hba->vpi_high) {
11757 hba->vpi_high = vpi;
11758 }
11759
11760 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
11761 sizeof (NAME_TYPE));
11762 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
11763 sizeof (NAME_TYPE));
11764
11765 if (hba->port[vpi].snn[0] == 0) {
11766 (void) strncpy((caddr_t)hba->port[vpi].snn,
11767 (caddr_t)hba->snn,
11768 (sizeof (hba->port[vpi].snn)-1));
11769 }
11770
11771 if (hba->port[vpi].spn[0] == 0) {
11772 (void) snprintf((caddr_t)hba->port[vpi].spn,
11773 sizeof (hba->port[vpi].spn),
11774 "%s VPort-%d",
11775 (caddr_t)hba->spn, vpi);
11776 }
11777
11778 hba->port[vpi].flag |=
11779 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
11780
11781 if (cfg[CFG_VPORT_RESTRICTED].current) {
11782 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
11783 }
11784 }
11785
11786 out:
11787
11788 (void) ddi_prop_free((void *) arrayp);
11789 return;
11790
11791 } /* emlxs_read_vport_prop() */
11792 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
11793
11794
11795 extern char *
emlxs_wwn_xlate(char * buffer,size_t len,uint8_t * wwn)11796 emlxs_wwn_xlate(char *buffer, size_t len, uint8_t *wwn)
11797 {
11798 (void) snprintf(buffer, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
11799 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
11800 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
11801
11802 return (buffer);
11803
11804 } /* emlxs_wwn_xlate() */
11805
11806
11807 extern int32_t
emlxs_wwn_cmp(uint8_t * wwn1,uint8_t * wwn2)11808 emlxs_wwn_cmp(uint8_t *wwn1, uint8_t *wwn2)
11809 {
11810 uint32_t i;
11811
11812 for (i = 0; i < 8; i ++, wwn1 ++, wwn2 ++) {
11813 if (*wwn1 > *wwn2) {
11814 return (1);
11815 }
11816 if (*wwn1 < *wwn2) {
11817 return (-1);
11818 }
11819 }
11820
11821 return (0);
11822
11823 } /* emlxs_wwn_cmp() */
11824
11825
11826 /* This is called at port online and offline */
11827 extern void
emlxs_ub_flush(emlxs_port_t * port)11828 emlxs_ub_flush(emlxs_port_t *port)
11829 {
11830 emlxs_hba_t *hba = HBA;
11831 fc_unsol_buf_t *ubp;
11832 emlxs_ub_priv_t *ub_priv;
11833 emlxs_ub_priv_t *next;
11834
11835 /* Return if nothing to do */
11836 if (!port->ub_wait_head) {
11837 return;
11838 }
11839
11840 mutex_enter(&EMLXS_PORT_LOCK);
11841 ub_priv = port->ub_wait_head;
11842 port->ub_wait_head = NULL;
11843 port->ub_wait_tail = NULL;
11844 mutex_exit(&EMLXS_PORT_LOCK);
11845
11846 while (ub_priv) {
11847 next = ub_priv->next;
11848 ubp = ub_priv->ubp;
11849
11850 /* Check if ULP is online and we have a callback function */
11851 if (port->ulp_statec != FC_STATE_OFFLINE) {
11852 /* Send ULP the ub buffer */
11853 emlxs_ulp_unsol_cb(port, ubp);
11854 } else { /* Drop the buffer */
11855 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11856 }
11857
11858 ub_priv = next;
11859
11860 } /* while () */
11861
11862 return;
11863
11864 } /* emlxs_ub_flush() */
11865
11866
11867 extern void
emlxs_ub_callback(emlxs_port_t * port,fc_unsol_buf_t * ubp)11868 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
11869 {
11870 emlxs_hba_t *hba = HBA;
11871 emlxs_ub_priv_t *ub_priv;
11872
11873 ub_priv = ubp->ub_fca_private;
11874
11875 /* Check if ULP is online */
11876 if (port->ulp_statec != FC_STATE_OFFLINE) {
11877 emlxs_ulp_unsol_cb(port, ubp);
11878
11879 } else { /* ULP offline */
11880
11881 if (hba->state >= FC_LINK_UP) {
11882 /* Add buffer to queue tail */
11883 mutex_enter(&EMLXS_PORT_LOCK);
11884
11885 if (port->ub_wait_tail) {
11886 port->ub_wait_tail->next = ub_priv;
11887 }
11888 port->ub_wait_tail = ub_priv;
11889
11890 if (!port->ub_wait_head) {
11891 port->ub_wait_head = ub_priv;
11892 }
11893
11894 mutex_exit(&EMLXS_PORT_LOCK);
11895 } else {
11896 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11897 }
11898 }
11899
11900 return;
11901
11902 } /* emlxs_ub_callback() */
11903
11904
11905 extern void
emlxs_fca_link_up(emlxs_port_t * port)11906 emlxs_fca_link_up(emlxs_port_t *port)
11907 {
11908 emlxs_ulp_statec_cb(port, port->ulp_statec);
11909 return;
11910
11911 } /* emlxs_fca_link_up() */
11912
11913
11914 extern void
emlxs_fca_link_down(emlxs_port_t * port)11915 emlxs_fca_link_down(emlxs_port_t *port)
11916 {
11917 emlxs_ulp_statec_cb(port, FC_STATE_OFFLINE);
11918 return;
11919
11920 } /* emlxs_fca_link_down() */
11921
11922
11923 static uint32_t
emlxs_integrity_check(emlxs_hba_t * hba)11924 emlxs_integrity_check(emlxs_hba_t *hba)
11925 {
11926 uint32_t size;
11927 uint32_t errors = 0;
11928 int ddiinst = hba->ddiinst;
11929
11930 size = 16;
11931 if (sizeof (ULP_BDL) != size) {
11932 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16",
11933 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
11934
11935 errors++;
11936 }
11937 size = 8;
11938 if (sizeof (ULP_BDE) != size) {
11939 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8",
11940 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
11941
11942 errors++;
11943 }
11944 size = 12;
11945 if (sizeof (ULP_BDE64) != size) {
11946 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12",
11947 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
11948
11949 errors++;
11950 }
11951 size = 16;
11952 if (sizeof (HBQE_t) != size) {
11953 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16",
11954 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11955
11956 errors++;
11957 }
11958 size = 8;
11959 if (sizeof (HGP) != size) {
11960 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8",
11961 DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11962
11963 errors++;
11964 }
11965 if (sizeof (PGP) != size) {
11966 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8",
11967 DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11968
11969 errors++;
11970 }
11971 size = 4;
11972 if (sizeof (WORD5) != size) {
11973 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4",
11974 DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11975
11976 errors++;
11977 }
11978 size = 124;
11979 if (sizeof (MAILVARIANTS) != size) {
11980 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. "
11981 "%d != 124", DRIVER_NAME, ddiinst,
11982 (int)sizeof (MAILVARIANTS));
11983
11984 errors++;
11985 }
11986 size = 128;
11987 if (sizeof (SLI1_DESC) != size) {
11988 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128",
11989 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11990
11991 errors++;
11992 }
11993 if (sizeof (SLI2_DESC) != size) {
11994 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128",
11995 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11996
11997 errors++;
11998 }
11999 size = MBOX_SIZE;
12000 if (sizeof (MAILBOX) != size) {
12001 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d",
12002 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
12003
12004 errors++;
12005 }
12006 size = PCB_SIZE;
12007 if (sizeof (PCB) != size) {
12008 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d",
12009 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
12010
12011 errors++;
12012 }
12013 size = 260;
12014 if (sizeof (ATTRIBUTE_ENTRY) != size) {
12015 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. "
12016 "%d != 260", DRIVER_NAME, ddiinst,
12017 (int)sizeof (ATTRIBUTE_ENTRY));
12018
12019 errors++;
12020 }
12021 size = SLI_SLIM1_SIZE;
12022 if (sizeof (SLIM1) != size) {
12023 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d",
12024 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
12025
12026 errors++;
12027 }
12028 size = SLI3_IOCB_CMD_SIZE;
12029 if (sizeof (IOCB) != size) {
12030 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d",
12031 DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
12032 SLI3_IOCB_CMD_SIZE);
12033
12034 errors++;
12035 }
12036
12037 size = SLI_SLIM2_SIZE;
12038 if (sizeof (SLIM2) != size) {
12039 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d",
12040 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
12041 SLI_SLIM2_SIZE);
12042
12043 errors++;
12044 }
12045 return (errors);
12046
12047 } /* emlxs_integrity_check() */
12048
12049
12050 #ifdef FMA_SUPPORT
12051 /*
12052 * FMA support
12053 */
12054
12055 extern void
emlxs_fm_init(emlxs_hba_t * hba)12056 emlxs_fm_init(emlxs_hba_t *hba)
12057 {
12058 ddi_iblock_cookie_t iblk;
12059
12060 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12061 return;
12062 }
12063
12064 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12065 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12066 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12067 }
12068
12069 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
12070 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12071 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
12072 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
12073 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
12074 } else {
12075 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12076 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12077 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12078 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12079 }
12080
12081 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
12082
12083 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12084 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12085 pci_ereport_setup(hba->dip);
12086 }
12087
12088 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12089 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
12090 (void *)hba);
12091 }
12092
12093 } /* emlxs_fm_init() */
12094
12095
12096 extern void
emlxs_fm_fini(emlxs_hba_t * hba)12097 emlxs_fm_fini(emlxs_hba_t *hba)
12098 {
12099 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12100 return;
12101 }
12102
12103 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12104 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12105 pci_ereport_teardown(hba->dip);
12106 }
12107
12108 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12109 ddi_fm_handler_unregister(hba->dip);
12110 }
12111
12112 (void) ddi_fm_fini(hba->dip);
12113
12114 } /* emlxs_fm_fini() */
12115
12116
12117 extern int
emlxs_fm_check_acc_handle(emlxs_hba_t * hba,ddi_acc_handle_t handle)12118 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
12119 {
12120 ddi_fm_error_t err;
12121
12122 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12123 return (DDI_FM_OK);
12124 }
12125
12126 /* Some S10 versions do not define the ahi_err structure */
12127 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
12128 return (DDI_FM_OK);
12129 }
12130
12131 err.fme_status = DDI_FM_OK;
12132 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
12133
12134 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */
12135 if ((void *)&ddi_fm_acc_err_clear != NULL) {
12136 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
12137 }
12138
12139 return (err.fme_status);
12140
12141 } /* emlxs_fm_check_acc_handle() */
12142
12143
12144 extern int
emlxs_fm_check_dma_handle(emlxs_hba_t * hba,ddi_dma_handle_t handle)12145 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
12146 {
12147 ddi_fm_error_t err;
12148
12149 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12150 return (DDI_FM_OK);
12151 }
12152
12153 err.fme_status = DDI_FM_OK;
12154 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
12155
12156 return (err.fme_status);
12157
12158 } /* emlxs_fm_check_dma_handle() */
12159
12160
12161 extern void
emlxs_fm_ereport(emlxs_hba_t * hba,char * detail)12162 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
12163 {
12164 uint64_t ena;
12165 char buf[FM_MAX_CLASS];
12166
12167 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12168 return;
12169 }
12170
12171 if (detail == NULL) {
12172 return;
12173 }
12174
12175 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12176 ena = fm_ena_generate(0, FM_ENA_FMT1);
12177
12178 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
12179 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12180
12181 } /* emlxs_fm_ereport() */
12182
12183
12184 extern void
emlxs_fm_service_impact(emlxs_hba_t * hba,int impact)12185 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
12186 {
12187 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12188 return;
12189 }
12190
12191 if (impact == 0) {
12192 return;
12193 }
12194
12195 if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
12196 (impact == DDI_SERVICE_DEGRADED)) {
12197 impact = DDI_SERVICE_UNAFFECTED;
12198 }
12199
12200 ddi_fm_service_impact(hba->dip, impact);
12201
12202 return;
12203
12204 } /* emlxs_fm_service_impact() */
12205
12206
12207 /*
12208 * The I/O fault service error handling callback function
12209 */
12210 /*ARGSUSED*/
12211 extern int
emlxs_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)12212 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
12213 const void *impl_data)
12214 {
12215 /*
12216 * as the driver can always deal with an error
12217 * in any dma or access handle, we can just return
12218 * the fme_status value.
12219 */
12220 pci_ereport_post(dip, err, NULL);
12221 return (err->fme_status);
12222
12223 } /* emlxs_fm_error_cb() */
12224
12225 extern void
emlxs_check_dma(emlxs_hba_t * hba,emlxs_buf_t * sbp)12226 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
12227 {
12228 emlxs_port_t *port = sbp->port;
12229 fc_packet_t *pkt = PRIV2PKT(sbp);
12230
12231 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
12232 if (emlxs_fm_check_dma_handle(hba,
12233 hba->sli.sli4.slim2.dma_handle)
12234 != DDI_FM_OK) {
12235 EMLXS_MSGF(EMLXS_CONTEXT,
12236 &emlxs_invalid_dma_handle_msg,
12237 "slim2: hdl=%p",
12238 hba->sli.sli4.slim2.dma_handle);
12239
12240 mutex_enter(&EMLXS_PORT_LOCK);
12241 hba->flag |= FC_DMA_CHECK_ERROR;
12242 mutex_exit(&EMLXS_PORT_LOCK);
12243 }
12244 } else {
12245 if (emlxs_fm_check_dma_handle(hba,
12246 hba->sli.sli3.slim2.dma_handle)
12247 != DDI_FM_OK) {
12248 EMLXS_MSGF(EMLXS_CONTEXT,
12249 &emlxs_invalid_dma_handle_msg,
12250 "slim2: hdl=%p",
12251 hba->sli.sli3.slim2.dma_handle);
12252
12253 mutex_enter(&EMLXS_PORT_LOCK);
12254 hba->flag |= FC_DMA_CHECK_ERROR;
12255 mutex_exit(&EMLXS_PORT_LOCK);
12256 }
12257 }
12258
12259 if (hba->flag & FC_DMA_CHECK_ERROR) {
12260 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12261 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12262 pkt->pkt_expln = FC_EXPLN_NONE;
12263 pkt->pkt_action = FC_ACTION_RETRYABLE;
12264 return;
12265 }
12266
12267 if (pkt->pkt_cmdlen) {
12268 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
12269 != DDI_FM_OK) {
12270 EMLXS_MSGF(EMLXS_CONTEXT,
12271 &emlxs_invalid_dma_handle_msg,
12272 "pkt_cmd_dma: hdl=%p",
12273 pkt->pkt_cmd_dma);
12274
12275 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12276 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12277 pkt->pkt_expln = FC_EXPLN_NONE;
12278 pkt->pkt_action = FC_ACTION_RETRYABLE;
12279
12280 return;
12281 }
12282 }
12283
12284 if (pkt->pkt_rsplen) {
12285 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
12286 != DDI_FM_OK) {
12287 EMLXS_MSGF(EMLXS_CONTEXT,
12288 &emlxs_invalid_dma_handle_msg,
12289 "pkt_resp_dma: hdl=%p",
12290 pkt->pkt_resp_dma);
12291
12292 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12293 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12294 pkt->pkt_expln = FC_EXPLN_NONE;
12295 pkt->pkt_action = FC_ACTION_RETRYABLE;
12296
12297 return;
12298 }
12299 }
12300
12301 if (pkt->pkt_datalen) {
12302 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
12303 != DDI_FM_OK) {
12304 EMLXS_MSGF(EMLXS_CONTEXT,
12305 &emlxs_invalid_dma_handle_msg,
12306 "pkt_data_dma: hdl=%p",
12307 pkt->pkt_data_dma);
12308
12309 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12310 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12311 pkt->pkt_expln = FC_EXPLN_NONE;
12312 pkt->pkt_action = FC_ACTION_RETRYABLE;
12313
12314 return;
12315 }
12316 }
12317
12318 return;
12319
12320 }
12321 #endif /* FMA_SUPPORT */
12322
12323
12324 extern void
emlxs_swap32_buffer(uint8_t * buffer,uint32_t size)12325 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
12326 {
12327 uint32_t word;
12328 uint32_t *wptr;
12329 uint32_t i;
12330
12331 VERIFY((size % 4) == 0);
12332
12333 wptr = (uint32_t *)buffer;
12334
12335 for (i = 0; i < size / 4; i++) {
12336 word = *wptr;
12337 *wptr++ = SWAP32(word);
12338 }
12339
12340 return;
12341
12342 } /* emlxs_swap32_buffer() */
12343
12344
12345 extern void
emlxs_swap32_bcopy(uint8_t * src,uint8_t * dst,uint32_t size)12346 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
12347 {
12348 uint32_t word;
12349 uint32_t *sptr;
12350 uint32_t *dptr;
12351 uint32_t i;
12352
12353 VERIFY((size % 4) == 0);
12354
12355 sptr = (uint32_t *)src;
12356 dptr = (uint32_t *)dst;
12357
12358 for (i = 0; i < size / 4; i++) {
12359 word = *sptr++;
12360 *dptr++ = SWAP32(word);
12361 }
12362
12363 return;
12364
12365 } /* emlxs_swap32_buffer() */
12366
12367
12368 extern char *
emlxs_strtoupper(char * str)12369 emlxs_strtoupper(char *str)
12370 {
12371 char *cptr = str;
12372
12373 while (*cptr) {
12374 if ((*cptr >= 'a') && (*cptr <= 'z')) {
12375 *cptr -= ('a' - 'A');
12376 }
12377 cptr++;
12378 }
12379
12380 return (str);
12381
12382 } /* emlxs_strtoupper() */
12383
12384
12385 extern void
emlxs_ulp_statec_cb(emlxs_port_t * port,uint32_t statec)12386 emlxs_ulp_statec_cb(emlxs_port_t *port, uint32_t statec)
12387 {
12388 emlxs_hba_t *hba = HBA;
12389
12390 /* This routine coordinates protection with emlxs_fca_unbind_port() */
12391
12392 mutex_enter(&EMLXS_PORT_LOCK);
12393 if (!(port->flag & EMLXS_INI_BOUND)) {
12394 mutex_exit(&EMLXS_PORT_LOCK);
12395 return;
12396 }
12397 port->ulp_busy++;
12398 mutex_exit(&EMLXS_PORT_LOCK);
12399
12400 port->ulp_statec_cb(port->ulp_handle, statec);
12401
12402 mutex_enter(&EMLXS_PORT_LOCK);
12403 port->ulp_busy--;
12404 mutex_exit(&EMLXS_PORT_LOCK);
12405
12406 return;
12407
12408 } /* emlxs_ulp_statec_cb() */
12409
12410
12411 extern void
emlxs_ulp_unsol_cb(emlxs_port_t * port,fc_unsol_buf_t * ubp)12412 emlxs_ulp_unsol_cb(emlxs_port_t *port, fc_unsol_buf_t *ubp)
12413 {
12414 emlxs_hba_t *hba = HBA;
12415
12416 /* This routine coordinates protection with emlxs_fca_unbind_port() */
12417
12418 mutex_enter(&EMLXS_PORT_LOCK);
12419 if (!(port->flag & EMLXS_INI_BOUND)) {
12420 mutex_exit(&EMLXS_PORT_LOCK);
12421 return;
12422 }
12423 port->ulp_busy++;
12424 mutex_exit(&EMLXS_PORT_LOCK);
12425
12426 port->ulp_unsol_cb(port->ulp_handle, ubp, ubp->ub_frame.type);
12427
12428 mutex_enter(&EMLXS_PORT_LOCK);
12429 port->ulp_busy--;
12430 mutex_exit(&EMLXS_PORT_LOCK);
12431
12432 return;
12433
12434 } /* emlxs_ulp_unsol_cb() */
12435