xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c (revision 148434217c040ea38dc844384f6ba68d9b325906)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Solaris external defines.
56  */
57 extern pri_t minclsyspri;
58 extern pri_t maxclsyspri;
59 
60 /*
61  * dev_ops functions prototypes
62  */
63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
66 static int ql_power(dev_info_t *, int, int);
67 static int ql_quiesce(dev_info_t *);
68 
69 /*
70  * FCA functions prototypes exported by means of the transport table
71  */
72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
73     fc_fca_bind_info_t *);
74 static void ql_unbind_port(opaque_t);
75 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
76 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
77 static int ql_els_send(opaque_t, fc_packet_t *);
78 static int ql_get_cap(opaque_t, char *, void *);
79 static int ql_set_cap(opaque_t, char *, void *);
80 static int ql_getmap(opaque_t, fc_lilpmap_t *);
81 static int ql_transport(opaque_t, fc_packet_t *);
82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
85 static int ql_abort(opaque_t, fc_packet_t *, int);
86 static int ql_reset(opaque_t, uint32_t);
87 static int ql_notify(opaque_t, uint32_t);
88 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
89 static opaque_t ql_get_device(opaque_t, fc_portid_t);
90 
91 /*
92  * FCA Driver Support Function Prototypes.
93  */
94 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
95 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
96     ql_srb_t *);
97 static void ql_task_daemon(void *);
98 static void ql_task_thread(ql_adapter_state_t *);
99 static void ql_unsol_callback(ql_srb_t *);
100 static void ql_dev_free(ql_adapter_state_t *, ql_tgt_t *);
101 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
102     fc_unsol_buf_t *);
103 static void ql_timer(void *);
104 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
105 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
106     uint32_t *, uint32_t *);
107 static void ql_halt(ql_adapter_state_t *, int);
108 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
122 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
123 static int ql_login_port(ql_adapter_state_t *, port_id_t);
124 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
126 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
127 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
128 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
129 static int ql_fcp_data_rsp(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
130 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
131 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
132 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
133     ql_srb_t *);
134 static int ql_kstat_update(kstat_t *, int);
135 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
136 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
137 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
138 static void ql_rst_aen(ql_adapter_state_t *);
139 static void ql_restart_queues(ql_adapter_state_t *);
140 static void ql_abort_queues(ql_adapter_state_t *);
141 static void ql_idle_check(ql_adapter_state_t *);
142 static int ql_loop_resync(ql_adapter_state_t *);
143 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
144 static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
145 static int ql_save_config_regs(dev_info_t *);
146 static int ql_restore_config_regs(dev_info_t *);
147 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
148 static int ql_handle_rscn_update(ql_adapter_state_t *);
149 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
150 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 static int ql_dump_firmware(ql_adapter_state_t *);
152 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
153 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
154 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
155 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
156 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
157 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
158     void *);
159 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
160     uint8_t);
161 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
162 static int ql_suspend_adapter(ql_adapter_state_t *);
163 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
164 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
165 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
166 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
167 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
168 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
169 static int ql_setup_interrupts(ql_adapter_state_t *);
170 static int ql_setup_msi(ql_adapter_state_t *);
171 static int ql_setup_msix(ql_adapter_state_t *);
172 static int ql_setup_fixed(ql_adapter_state_t *);
173 static void ql_release_intr(ql_adapter_state_t *);
174 static void ql_disable_intr(ql_adapter_state_t *);
175 static int ql_legacy_intr(ql_adapter_state_t *);
176 static int ql_init_mutex(ql_adapter_state_t *);
177 static void ql_destroy_mutex(ql_adapter_state_t *);
178 static void ql_iidma(ql_adapter_state_t *);
179 
180 int ql_el_trace_desc_ctor(ql_adapter_state_t *ha);
181 int ql_el_trace_desc_dtor(ql_adapter_state_t *ha);
182 /*
183  * Global data
184  */
185 static uint8_t	ql_enable_pm = 1;
186 static int	ql_flash_sbus_fpga = 0;
187 uint32_t	ql_os_release_level;
188 uint32_t	ql_disable_aif = 0;
189 uint32_t	ql_disable_msi = 0;
190 uint32_t	ql_disable_msix = 0;
191 
192 /* Timer routine variables. */
193 static timeout_id_t	ql_timer_timeout_id = NULL;
194 static clock_t		ql_timer_ticks;
195 
196 /* Soft state head pointer. */
197 void *ql_state = NULL;
198 
199 /* Head adapter link. */
200 ql_head_t ql_hba = {
201 	NULL,
202 	NULL
203 };
204 
205 /* Global hba index */
206 uint32_t ql_gfru_hba_index = 1;
207 
208 /*
209  * Some IP defines and globals
210  */
211 uint32_t	ql_ip_buffer_count = 128;
212 uint32_t	ql_ip_low_water = 10;
213 uint8_t		ql_ip_fast_post_count = 5;
214 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
215 
216 /* Device AL_PA to Device Head Queue index array. */
217 uint8_t ql_alpa_to_index[] = {
218 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
219 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
220 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
221 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
222 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
223 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
224 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
225 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
226 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
227 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
228 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
229 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
230 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
231 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
232 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
233 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
234 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
235 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
236 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
237 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
238 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
239 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
240 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
241 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
242 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
243 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
244 };
245 
246 /* Device loop_id to ALPA array. */
247 static uint8_t ql_index_to_alpa[] = {
248 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
249 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
250 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
251 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
252 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
253 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
254 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
255 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
256 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
257 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
258 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
259 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
260 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
261 };
262 
263 /* 2200 register offsets */
264 static reg_off_t reg_off_2200 = {
265 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
266 	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
267 	0x00, 0x00, /* intr info lo, hi */
268 	24, /* Number of mailboxes */
269 	/* Mailbox register offsets */
270 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
271 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
272 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
273 	/* 2200 does not have mailbox 24-31 */
274 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
275 	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
276 	/* host to host sema */
277 	0x00,
278 	/* 2200 does not have pri_req_in, pri_req_out, */
279 	/* atio_req_in, atio_req_out, io_base_addr */
280 	0xff, 0xff, 0xff, 0xff,	0xff
281 };
282 
283 /* 2300 register offsets */
284 static reg_off_t reg_off_2300 = {
285 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
286 	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
287 	0x18, 0x1A, /* intr info lo, hi */
288 	32, /* Number of mailboxes */
289 	/* Mailbox register offsets */
290 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
291 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
292 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
293 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
294 	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
295 	/* host to host sema */
296 	0x1c,
297 	/* 2300 does not have pri_req_in, pri_req_out, */
298 	/* atio_req_in, atio_req_out, io_base_addr */
299 	0xff, 0xff, 0xff, 0xff,	0xff
300 };
301 
302 /* 2400/2500 register offsets */
303 reg_off_t reg_off_2400_2500 = {
304 	0x00, 0x04,		/* flash_address, flash_data */
305 	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
306 	/* 2400 does not have semaphore, nvram */
307 	0x14, 0x18,
308 	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
309 	0x44, 0x46,		/* intr info lo, hi */
310 	32,			/* Number of mailboxes */
311 	/* Mailbox register offsets */
312 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
313 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
314 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
315 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
316 	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
317 	0xff, 0xff, 0xff, 0xff,
318 	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
319 	0xff,			/* host to host sema */
320 	0x2c, 0x30,		/* pri_req_in, pri_req_out */
321 	0x3c, 0x40,		/* atio_req_in, atio_req_out */
322 	0x54			/* io_base_addr */
323 };
324 
325 /* mutex for protecting variables shared by all instances of the driver */
326 kmutex_t ql_global_mutex;
327 kmutex_t ql_global_hw_mutex;
328 kmutex_t ql_global_el_mutex;
329 
330 /* DMA access attribute structure. */
331 static ddi_device_acc_attr_t ql_dev_acc_attr = {
332 	DDI_DEVICE_ATTR_V0,
333 	DDI_STRUCTURE_LE_ACC,
334 	DDI_STRICTORDER_ACC
335 };
336 
337 /* I/O DMA attributes structures. */
338 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
339 	DMA_ATTR_V0,			/* dma_attr_version */
340 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
341 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
342 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
343 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
344 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
345 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
346 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
347 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
348 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
349 	QL_DMA_GRANULARITY,		/* granularity of device */
350 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
351 };
352 
353 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
354 	DMA_ATTR_V0,			/* dma_attr_version */
355 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
356 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
357 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
358 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
359 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
360 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
361 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
362 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
363 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
364 	QL_DMA_GRANULARITY,		/* granularity of device */
365 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
366 };
367 
368 /* Load the default dma attributes */
369 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
370 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
371 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
372 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
373 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
374 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
375 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
376 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
377 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
378 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
379 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
380 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
381 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
382 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
383 
384 /* Static declarations of cb_ops entry point functions... */
385 static struct cb_ops ql_cb_ops = {
386 	ql_open,			/* b/c open */
387 	ql_close,			/* b/c close */
388 	nodev,				/* b strategy */
389 	nodev,				/* b print */
390 	nodev,				/* b dump */
391 	nodev,				/* c read */
392 	nodev,				/* c write */
393 	ql_ioctl,			/* c ioctl */
394 	nodev,				/* c devmap */
395 	nodev,				/* c mmap */
396 	nodev,				/* c segmap */
397 	nochpoll,			/* c poll */
398 	nodev,				/* cb_prop_op */
399 	NULL,				/* streamtab  */
400 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
401 	CB_REV,				/* cb_ops revision */
402 	nodev,				/* c aread */
403 	nodev				/* c awrite */
404 };
405 
406 /* Static declarations of dev_ops entry point functions... */
407 static struct dev_ops ql_devops = {
408 	DEVO_REV,			/* devo_rev */
409 	0,				/* refcnt */
410 	ql_getinfo,			/* getinfo */
411 	nulldev,			/* identify */
412 	nulldev,			/* probe */
413 	ql_attach,			/* attach */
414 	ql_detach,			/* detach */
415 	nodev,				/* reset */
416 	&ql_cb_ops,			/* char/block ops */
417 	NULL,				/* bus operations */
418 	ql_power,			/* power management */
419 	ql_quiesce			/* quiesce device */
420 };
421 
422 /* ELS command code to text converter */
423 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
424 /* Mailbox command code to text converter */
425 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
426 
427 char qlc_driver_version[] = QL_VERSION;
428 
429 /*
430  * Loadable Driver Interface Structures.
431  * Declare and initialize the module configuration section...
432  */
433 static struct modldrv modldrv = {
434 	&mod_driverops,				/* type of module: driver */
435 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
436 	&ql_devops				/* driver dev_ops */
437 };
438 
439 static struct modlinkage modlinkage = {
440 	MODREV_1,
441 	&modldrv,
442 	NULL
443 };
444 
445 /* ************************************************************************ */
446 /*				Loadable Module Routines.		    */
447 /* ************************************************************************ */
448 
449 /*
450  * _init
451  *	Initializes a loadable module. It is called before any other
452  *	routine in a loadable module.
453  *
454  * Returns:
455  *	0 = success
456  *
457  * Context:
458  *	Kernel context.
459  */
460 int
461 _init(void)
462 {
463 	uint16_t	w16;
464 	int		rval = 0;
465 
466 	/* Get OS major release level. */
467 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
468 		if (utsname.release[w16] == '.') {
469 			w16++;
470 			break;
471 		}
472 	}
473 	if (w16 < sizeof (utsname.release)) {
474 		(void) ql_bstr_to_dec(&utsname.release[w16],
475 		    &ql_os_release_level, 0);
476 	} else {
477 		ql_os_release_level = 0;
478 	}
479 	if (ql_os_release_level < 6) {
480 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
481 		    QL_NAME, ql_os_release_level);
482 		rval = EINVAL;
483 	}
484 	if (ql_os_release_level == 6) {
485 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
486 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
487 	}
488 
489 	if (rval == 0) {
490 		rval = ddi_soft_state_init(&ql_state,
491 		    sizeof (ql_adapter_state_t), 0);
492 	}
493 	if (rval == 0) {
494 		/* allow the FC Transport to tweak the dev_ops */
495 		fc_fca_init(&ql_devops);
496 
497 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
498 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
499 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
500 		rval = mod_install(&modlinkage);
501 		if (rval != 0) {
502 			mutex_destroy(&ql_global_hw_mutex);
503 			mutex_destroy(&ql_global_mutex);
504 			mutex_destroy(&ql_global_el_mutex);
505 			ddi_soft_state_fini(&ql_state);
506 		} else {
507 			/*EMPTY*/
508 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
509 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
510 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
511 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
512 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
513 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
514 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
515 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
516 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
517 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
518 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
519 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
520 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
521 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
522 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
523 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
524 			    QL_FCSM_CMD_SGLLEN;
525 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
526 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
527 			    QL_FCSM_RSP_SGLLEN;
528 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
529 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
530 			    QL_FCIP_CMD_SGLLEN;
531 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
532 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
533 			    QL_FCIP_RSP_SGLLEN;
534 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
535 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
536 			    QL_FCP_CMD_SGLLEN;
537 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
538 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
539 			    QL_FCP_RSP_SGLLEN;
540 		}
541 	}
542 
543 	if (rval != 0) {
544 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
545 		    QL_NAME);
546 	}
547 
548 	return (rval);
549 }
550 
551 /*
552  * _fini
553  *	Prepares a module for unloading. It is called when the system
554  *	wants to unload a module. If the module determines that it can
555  *	be unloaded, then _fini() returns the value returned by
556  *	mod_remove(). Upon successful return from _fini() no other
557  *	routine in the module will be called before _init() is called.
558  *
559  * Returns:
560  *	0 = success
561  *
562  * Context:
563  *	Kernel context.
564  */
565 int
566 _fini(void)
567 {
568 	int	rval;
569 
570 	rval = mod_remove(&modlinkage);
571 	if (rval == 0) {
572 		mutex_destroy(&ql_global_hw_mutex);
573 		mutex_destroy(&ql_global_mutex);
574 		mutex_destroy(&ql_global_el_mutex);
575 		ddi_soft_state_fini(&ql_state);
576 	}
577 
578 	return (rval);
579 }
580 
581 /*
582  * _info
583  *	Returns information about loadable module.
584  *
585  * Input:
586  *	modinfo = pointer to module information structure.
587  *
588  * Returns:
589  *	Value returned by mod_info().
590  *
591  * Context:
592  *	Kernel context.
593  */
594 int
595 _info(struct modinfo *modinfop)
596 {
597 	return (mod_info(&modlinkage, modinfop));
598 }
599 
600 /* ************************************************************************ */
601 /*			dev_ops functions				    */
602 /* ************************************************************************ */
603 
604 /*
605  * ql_getinfo
606  *	Returns the pointer associated with arg when cmd is
607  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
608  *	instance number associated with arg when cmd is set
609  *	to DDI_INFO_DEV2INSTANCE.
610  *
611  * Input:
612  *	dip = Do not use.
613  *	cmd = command argument.
614  *	arg = command specific argument.
615  *	resultp = pointer to where request information is stored.
616  *
617  * Returns:
618  *	DDI_SUCCESS or DDI_FAILURE.
619  *
620  * Context:
621  *	Kernel context.
622  */
623 /* ARGSUSED */
624 static int
625 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
626 {
627 	ql_adapter_state_t	*ha;
628 	int			minor;
629 	int			rval = DDI_FAILURE;
630 
631 	minor = (int)(getminor((dev_t)arg));
632 	ha = ddi_get_soft_state(ql_state, minor);
633 	if (ha == NULL) {
634 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
635 		    getminor((dev_t)arg));
636 		*resultp = NULL;
637 		return (rval);
638 	}
639 
640 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
641 
642 	switch (cmd) {
643 	case DDI_INFO_DEVT2DEVINFO:
644 		*resultp = ha->dip;
645 		rval = DDI_SUCCESS;
646 		break;
647 	case DDI_INFO_DEVT2INSTANCE:
648 		*resultp = (void *)(uintptr_t)(ha->instance);
649 		rval = DDI_SUCCESS;
650 		break;
651 	default:
652 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
653 		rval = DDI_FAILURE;
654 		break;
655 	}
656 
657 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
658 
659 	return (rval);
660 }
661 
662 /*
663  * ql_attach
664  *	Configure and attach an instance of the driver
665  *	for a port.
666  *
667  * Input:
668  *	dip = pointer to device information structure.
669  *	cmd = attach type.
670  *
671  * Returns:
672  *	DDI_SUCCESS or DDI_FAILURE.
673  *
674  * Context:
675  *	Kernel context.
676  */
677 static int
678 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
679 {
680 	uint32_t		size;
681 	int			rval;
682 	int			instance;
683 	uint_t			progress = 0;
684 	char			*buf;
685 	ushort_t		caps_ptr, cap;
686 	fc_fca_tran_t		*tran;
687 	ql_adapter_state_t	*ha = NULL;
688 
689 	static char *pmcomps[] = {
690 		NULL,
691 		PM_LEVEL_D3_STR,		/* Device OFF */
692 		PM_LEVEL_D0_STR,		/* Device ON */
693 	};
694 
695 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
696 	    ddi_get_instance(dip), cmd);
697 
698 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
699 
700 	switch (cmd) {
701 	case DDI_ATTACH:
702 		/* first get the instance */
703 		instance = ddi_get_instance(dip);
704 
705 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
706 		    QL_NAME, instance, QL_VERSION);
707 
708 		/* Correct OS version? */
709 		if (ql_os_release_level != 11) {
710 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
711 			    "11", QL_NAME, instance);
712 			goto attach_failed;
713 		}
714 
715 		/* Hardware is installed in a DMA-capable slot? */
716 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
717 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
718 			    instance);
719 			goto attach_failed;
720 		}
721 
722 		/* No support for high-level interrupts */
723 		if (ddi_intr_hilevel(dip, 0) != 0) {
724 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
725 			    " not supported", QL_NAME, instance);
726 			goto attach_failed;
727 		}
728 
729 		/* Allocate our per-device-instance structure */
730 		if (ddi_soft_state_zalloc(ql_state,
731 		    instance) != DDI_SUCCESS) {
732 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
733 			    QL_NAME, instance);
734 			goto attach_failed;
735 		}
736 		progress |= QL_SOFT_STATE_ALLOCED;
737 
738 		ha = ddi_get_soft_state(ql_state, instance);
739 		if (ha == NULL) {
740 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
741 			    QL_NAME, instance);
742 			goto attach_failed;
743 		}
744 		ha->dip = dip;
745 		ha->instance = instance;
746 		ha->hba.base_address = ha;
747 		ha->pha = ha;
748 
749 		if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
750 			cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
751 			    QL_NAME, instance);
752 			goto attach_failed;
753 		}
754 
755 		/* Get extended logging and dump flags. */
756 		ql_common_properties(ha);
757 
758 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
759 		    "sbus") == 0) {
760 			EL(ha, "%s SBUS card detected", QL_NAME);
761 			ha->cfg_flags |= CFG_SBUS_CARD;
762 		}
763 
764 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
765 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
766 
767 		ha->outstanding_cmds = kmem_zalloc(
768 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
769 		    KM_SLEEP);
770 
771 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
772 		    QL_UB_LIMIT, KM_SLEEP);
773 
774 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
775 		    KM_SLEEP);
776 
777 		(void) ddi_pathname(dip, buf);
778 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
779 		if (ha->devpath == NULL) {
780 			EL(ha, "devpath mem alloc failed\n");
781 		} else {
782 			(void) strcpy(ha->devpath, buf);
783 			EL(ha, "devpath is: %s\n", ha->devpath);
784 		}
785 
786 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
787 			/*
788 			 * For cards where PCI is mapped to sbus e.g. Ivory.
789 			 *
790 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
791 			 *	: 0x100 - 0x3FF PCI IO space for 2200
792 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
793 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
794 			 */
795 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
796 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
797 			    != DDI_SUCCESS) {
798 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
799 				    " registers", QL_NAME, instance);
800 				goto attach_failed;
801 			}
802 			if (ddi_regs_map_setup(dip, 1,
803 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
804 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
805 			    != DDI_SUCCESS) {
806 				/* We should not fail attach here */
807 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
808 				    QL_NAME, instance);
809 				ha->sbus_fpga_iobase = NULL;
810 			}
811 			progress |= QL_REGS_MAPPED;
812 		} else {
813 			/*
814 			 * Setup the ISP2200 registers address mapping to be
815 			 * accessed by this particular driver.
816 			 * 0x0   Configuration Space
817 			 * 0x1   I/O Space
818 			 * 0x2   32-bit Memory Space address
819 			 * 0x3   64-bit Memory Space address
820 			 */
821 			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
822 			    0, 0x100, &ql_dev_acc_attr,
823 			    &ha->dev_handle) != DDI_SUCCESS) {
824 				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
825 				    "failed", QL_NAME, instance);
826 				goto attach_failed;
827 			}
828 			progress |= QL_REGS_MAPPED;
829 
830 			/*
831 			 * We need I/O space mappings for 23xx HBAs for
832 			 * loading flash (FCode). The chip has a bug due to
833 			 * which loading flash fails through mem space
834 			 * mappings in PCI-X mode.
835 			 */
836 			if (ddi_regs_map_setup(dip, 1,
837 			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
838 			    &ql_dev_acc_attr,
839 			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
840 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
841 				    " failed", QL_NAME, instance);
842 				goto attach_failed;
843 			}
844 			progress |= QL_IOMAP_IOBASE_MAPPED;
845 		}
846 
847 		/*
848 		 * We should map config space before adding interrupt
849 		 * So that the chip type (2200 or 2300) can be determined
850 		 * before the interrupt routine gets a chance to execute.
851 		 */
852 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
853 			if (ddi_regs_map_setup(dip, 0,
854 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
855 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
856 			    DDI_SUCCESS) {
857 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
858 				    "config registers", QL_NAME, instance);
859 				goto attach_failed;
860 			}
861 		} else {
862 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
863 			    DDI_SUCCESS) {
864 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
865 				    "config space", QL_NAME, instance);
866 				goto attach_failed;
867 			}
868 		}
869 		progress |= QL_CONFIG_SPACE_SETUP;
870 
871 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
872 		    PCI_CONF_SUBSYSID);
873 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
874 		    PCI_CONF_SUBVENID);
875 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
876 		    PCI_CONF_VENID);
877 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
878 		    PCI_CONF_DEVID);
879 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
880 		    PCI_CONF_REVID);
881 
882 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
883 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
884 		    ha->subven_id, ha->subsys_id);
885 
886 		switch (ha->device_id) {
887 		case 0x2300:
888 		case 0x2312:
889 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
890 		case 0x6312:
891 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
892 			ha->cfg_flags |= CFG_CTRL_2300;
893 			ha->fw_class = 0x2300;
894 			ha->reg_off = &reg_off_2300;
895 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
896 				goto attach_failed;
897 			}
898 			ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
899 			ha->fcp_cmd = ql_command_iocb;
900 			ha->ip_cmd = ql_ip_iocb;
901 			ha->ms_cmd = ql_ms_iocb;
902 			ha->ctio_cmd = ql_continue_target_io_iocb;
903 			break;
904 
905 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
906 		case 0x6322:
907 			/*
908 			 * per marketing, fibre-lite HBA's are not supported
909 			 * on sparc platforms
910 			 */
911 			ha->cfg_flags |= CFG_CTRL_6322;
912 			ha->fw_class = 0x6322;
913 			ha->reg_off = &reg_off_2300;
914 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
915 				goto attach_failed;
916 			}
917 			ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
918 			ha->fcp_cmd = ql_command_iocb;
919 			ha->ip_cmd = ql_ip_iocb;
920 			ha->ms_cmd = ql_ms_iocb;
921 			ha->ctio_cmd = ql_continue_target_io_iocb;
922 			break;
923 
924 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
925 		case 0x2200:
926 			ha->cfg_flags |= CFG_CTRL_2200;
927 			ha->reg_off = &reg_off_2200;
928 			ha->fw_class = 0x2200;
929 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
930 				goto attach_failed;
931 			}
932 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
933 			ha->fcp_cmd = ql_command_iocb;
934 			ha->ip_cmd = ql_ip_iocb;
935 			ha->ms_cmd = ql_ms_iocb;
936 			ha->ctio_cmd = ql_continue_target_io_iocb;
937 			break;
938 
939 		case 0x2422:
940 		case 0x2432:
941 		case 0x5422:
942 		case 0x5432:
943 		case 0x8432:
944 #ifdef __sparc
945 			/*
946 			 * Per marketing, the QLA/QLE-2440's (which
947 			 * also use the 2422 & 2432) are only for the
948 			 * x86 platform (SMB market).
949 			 */
950 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
951 			    ha->subsys_id == 0x13e) {
952 				cmn_err(CE_WARN,
953 				    "%s(%d): Unsupported HBA ssid: %x",
954 				    QL_NAME, instance, ha->subsys_id);
955 				goto attach_failed;
956 			}
957 #endif	/* __sparc */
958 			ha->cfg_flags |= CFG_CTRL_2422;
959 			if (ha->device_id == 0x8432) {
960 				ha->cfg_flags |= CFG_CTRL_MENLO;
961 			} else {
962 				ha->flags |= VP_ENABLED;
963 			}
964 
965 			ha->reg_off = &reg_off_2400_2500;
966 			ha->fw_class = 0x2400;
967 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
968 				goto attach_failed;
969 			}
970 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
971 			ha->fcp_cmd = ql_command_24xx_iocb;
972 			ha->ip_cmd = ql_ip_24xx_iocb;
973 			ha->ms_cmd = ql_ms_24xx_iocb;
974 			ha->ctio_cmd = ql_continue_target_io_2400_iocb;
975 			ha->flash_errlog_start = RD32_IO_REG(ha, ctrl_status) &
976 			    FUNCTION_NUMBER ? FLASH_2400_ERRLOG_START_ADDR_1 :
977 			    FLASH_2400_ERRLOG_START_ADDR_0;
978 			break;
979 
980 		case 0x2522:
981 		case 0x2532:
982 			ha->cfg_flags |= CFG_CTRL_25XX;
983 			ha->flags |= VP_ENABLED;
984 			ha->fw_class = 0x2500;
985 			ha->reg_off = &reg_off_2400_2500;
986 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
987 				goto attach_failed;
988 			}
989 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
990 			ha->fcp_cmd = ql_command_24xx_iocb;
991 			ha->ip_cmd = ql_ip_24xx_iocb;
992 			ha->ms_cmd = ql_ms_24xx_iocb;
993 			ha->ctio_cmd = ql_continue_target_io_2400_iocb;
994 			ha->flash_errlog_start = RD32_IO_REG(ha, ctrl_status) &
995 			    FUNCTION_NUMBER ? FLASH_2500_ERRLOG_START_ADDR_1 :
996 			    FLASH_2500_ERRLOG_START_ADDR_0;
997 			break;
998 
999 		default:
1000 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1001 			    QL_NAME, instance, ha->device_id);
1002 			goto attach_failed;
1003 		}
1004 
1005 		/* Setup hba buffer. */
1006 
1007 		size = CFG_IST(ha, CFG_CTRL_2425) ?
1008 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1009 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1010 		    RCVBUF_QUEUE_SIZE);
1011 
1012 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1013 		    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1014 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1015 			    "alloc failed", QL_NAME, instance);
1016 			goto attach_failed;
1017 		}
1018 		progress |= QL_HBA_BUFFER_SETUP;
1019 
1020 		/* Setup buffer pointers. */
1021 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1022 		    REQUEST_Q_BUFFER_OFFSET;
1023 		ha->request_ring_bp = (struct cmd_entry *)
1024 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1025 
1026 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1027 		    RESPONSE_Q_BUFFER_OFFSET;
1028 		ha->response_ring_bp = (struct sts_entry *)
1029 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1030 
1031 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1032 		    RCVBUF_Q_BUFFER_OFFSET;
1033 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1034 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1035 
1036 		/* Allocate resource for QLogic IOCTL */
1037 		(void) ql_alloc_xioctl_resource(ha);
1038 
1039 		/* Setup interrupts */
1040 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1041 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1042 			    "rval=%xh", QL_NAME, instance, rval);
1043 			goto attach_failed;
1044 		}
1045 
1046 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1047 
1048 		/*
1049 		 * Determine support for Power Management
1050 		 */
1051 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1052 
1053 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1054 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1055 			if (cap == PCI_CAP_ID_PM) {
1056 				ha->pm_capable = 1;
1057 				break;
1058 			}
1059 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1060 			    PCI_CAP_NEXT_PTR);
1061 		}
1062 
1063 		if (ha->pm_capable) {
1064 			/*
1065 			 * Enable PM for 2200 based HBAs only.
1066 			 */
1067 			if (ha->device_id != 0x2200) {
1068 				ha->pm_capable = 0;
1069 			}
1070 		}
1071 
1072 		if (ha->pm_capable) {
1073 			ha->pm_capable = ql_enable_pm;
1074 		}
1075 
1076 		if (ha->pm_capable) {
1077 			/*
1078 			 * Initialize power management bookkeeping;
1079 			 * components are created idle.
1080 			 */
1081 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1082 			pmcomps[0] = buf;
1083 
1084 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1085 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1086 			    dip, "pm-components", pmcomps,
1087 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1088 			    DDI_PROP_SUCCESS) {
1089 				cmn_err(CE_WARN, "%s(%d): failed to create"
1090 				    " pm-components property", QL_NAME,
1091 				    instance);
1092 
1093 				/* Initialize adapter. */
1094 				ha->power_level = PM_LEVEL_D0;
1095 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1096 					cmn_err(CE_WARN, "%s(%d): failed to"
1097 					    " initialize adapter", QL_NAME,
1098 					    instance);
1099 					goto attach_failed;
1100 				}
1101 			} else {
1102 				ha->power_level = PM_LEVEL_D3;
1103 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1104 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1105 					cmn_err(CE_WARN, "%s(%d): failed to"
1106 					    " raise power or initialize"
1107 					    " adapter", QL_NAME, instance);
1108 				}
1109 				ASSERT(ha->power_level == PM_LEVEL_D0);
1110 			}
1111 		} else {
1112 			/* Initialize adapter. */
1113 			ha->power_level = PM_LEVEL_D0;
1114 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1115 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1116 				    " adapter", QL_NAME, instance);
1117 			}
1118 		}
1119 
1120 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1121 		    ha->fw_subminor_version == 0) {
1122 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1123 			    QL_NAME, ha->instance);
1124 		} else {
1125 			cmn_err(CE_NOTE, "!%s(%d): Firmware version %d.%d.%d",
1126 			    QL_NAME, ha->instance, ha->fw_major_version,
1127 			    ha->fw_minor_version, ha->fw_subminor_version);
1128 		}
1129 
1130 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1131 		    "controller", KSTAT_TYPE_RAW,
1132 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1133 		if (ha->k_stats == NULL) {
1134 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1135 			    QL_NAME, instance);
1136 			goto attach_failed;
1137 		}
1138 		progress |= QL_KSTAT_CREATED;
1139 
1140 		ha->adapter_stats->version = 1;
1141 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1142 		ha->k_stats->ks_private = ha;
1143 		ha->k_stats->ks_update = ql_kstat_update;
1144 		ha->k_stats->ks_ndata = 1;
1145 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1146 		kstat_install(ha->k_stats);
1147 
1148 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1149 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1150 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1151 			    QL_NAME, instance);
1152 			goto attach_failed;
1153 		}
1154 		progress |= QL_MINOR_NODE_CREATED;
1155 
1156 		/* Allocate a transport structure for this instance */
1157 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1158 		ASSERT(tran != NULL);
1159 
1160 		progress |= QL_FCA_TRAN_ALLOCED;
1161 
1162 		/* fill in the structure */
1163 		tran->fca_numports = 1;
1164 		tran->fca_version = FCTL_FCA_MODREV_5;
1165 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1166 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1167 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
1168 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1169 		}
1170 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1171 		    tran->fca_perm_pwwn.raw_wwn, 8);
1172 
1173 		EL(ha, "FCA version %d\n", tran->fca_version);
1174 
1175 		/* Specify the amount of space needed in each packet */
1176 		tran->fca_pkt_size = sizeof (ql_srb_t);
1177 
1178 		/* command limits are usually dictated by hardware */
1179 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1180 
1181 		/* dmaattr are static, set elsewhere. */
1182 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1183 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1184 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1185 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1186 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1187 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1188 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1189 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1190 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1191 		} else {
1192 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1193 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1194 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1195 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1196 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1197 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1198 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1199 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1200 		}
1201 
1202 		tran->fca_acc_attr = &ql_dev_acc_attr;
1203 		tran->fca_iblock = &(ha->iblock_cookie);
1204 
1205 		/* the remaining values are simply function vectors */
1206 		tran->fca_bind_port = ql_bind_port;
1207 		tran->fca_unbind_port = ql_unbind_port;
1208 		tran->fca_init_pkt = ql_init_pkt;
1209 		tran->fca_un_init_pkt = ql_un_init_pkt;
1210 		tran->fca_els_send = ql_els_send;
1211 		tran->fca_get_cap = ql_get_cap;
1212 		tran->fca_set_cap = ql_set_cap;
1213 		tran->fca_getmap = ql_getmap;
1214 		tran->fca_transport = ql_transport;
1215 		tran->fca_ub_alloc = ql_ub_alloc;
1216 		tran->fca_ub_free = ql_ub_free;
1217 		tran->fca_ub_release = ql_ub_release;
1218 		tran->fca_abort = ql_abort;
1219 		tran->fca_reset = ql_reset;
1220 		tran->fca_port_manage = ql_port_manage;
1221 		tran->fca_get_device = ql_get_device;
1222 		tran->fca_notify = ql_notify;
1223 
1224 		/* give it to the FC transport */
1225 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1226 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1227 			    instance);
1228 			goto attach_failed;
1229 		}
1230 		progress |= QL_FCA_ATTACH_DONE;
1231 
1232 		/* Stash the structure so it can be freed at detach */
1233 		ha->tran = tran;
1234 
1235 		/* Acquire global state lock. */
1236 		GLOBAL_STATE_LOCK();
1237 
1238 		/* Add adapter structure to link list. */
1239 		ql_add_link_b(&ql_hba, &ha->hba);
1240 
1241 		/* Start one second driver timer. */
1242 		if (ql_timer_timeout_id == NULL) {
1243 			ql_timer_ticks = drv_usectohz(1000000);
1244 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1245 			    ql_timer_ticks);
1246 		}
1247 
1248 		/* Release global state lock. */
1249 		GLOBAL_STATE_UNLOCK();
1250 
1251 		/* Determine and populate HBA fru info */
1252 		ql_setup_fruinfo(ha);
1253 
1254 		/* Setup task_daemon thread. */
1255 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1256 		    0, &p0, TS_RUN, minclsyspri);
1257 
1258 		progress |= QL_TASK_DAEMON_STARTED;
1259 
1260 		ddi_report_dev(dip);
1261 
1262 		/* Disable link reset in panic path */
1263 		ha->lip_on_panic = 1;
1264 
1265 		rval = DDI_SUCCESS;
1266 		break;
1267 
1268 attach_failed:
1269 		if (progress & QL_FCA_ATTACH_DONE) {
1270 			(void) fc_fca_detach(dip);
1271 			progress &= ~QL_FCA_ATTACH_DONE;
1272 		}
1273 
1274 		if (progress & QL_FCA_TRAN_ALLOCED) {
1275 			kmem_free(tran, sizeof (fc_fca_tran_t));
1276 			progress &= ~QL_FCA_TRAN_ALLOCED;
1277 		}
1278 
1279 		if (progress & QL_MINOR_NODE_CREATED) {
1280 			ddi_remove_minor_node(dip, "devctl");
1281 			progress &= ~QL_MINOR_NODE_CREATED;
1282 		}
1283 
1284 		if (progress & QL_KSTAT_CREATED) {
1285 			kstat_delete(ha->k_stats);
1286 			progress &= ~QL_KSTAT_CREATED;
1287 		}
1288 
1289 		if (progress & QL_TASK_DAEMON_STARTED) {
1290 			TASK_DAEMON_LOCK(ha);
1291 
1292 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1293 
1294 			cv_signal(&ha->cv_task_daemon);
1295 
1296 			/* Release task daemon lock. */
1297 			TASK_DAEMON_UNLOCK(ha);
1298 
1299 			/* Wait for for task daemon to stop running. */
1300 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1301 				ql_delay(ha, 10000);
1302 			}
1303 			progress &= ~QL_TASK_DAEMON_STARTED;
1304 		}
1305 
1306 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1307 			ddi_regs_map_free(&ha->iomap_dev_handle);
1308 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1309 		}
1310 
1311 		if (progress & QL_CONFIG_SPACE_SETUP) {
1312 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1313 				ddi_regs_map_free(&ha->sbus_config_handle);
1314 			} else {
1315 				pci_config_teardown(&ha->pci_handle);
1316 			}
1317 			progress &= ~QL_CONFIG_SPACE_SETUP;
1318 		}
1319 
1320 		if (progress & QL_INTR_ADDED) {
1321 			ql_disable_intr(ha);
1322 			ql_release_intr(ha);
1323 			progress &= ~QL_INTR_ADDED;
1324 		}
1325 
1326 		if (progress & QL_MUTEX_CV_INITED) {
1327 			ql_destroy_mutex(ha);
1328 			progress &= ~QL_MUTEX_CV_INITED;
1329 		}
1330 
1331 		if (progress & QL_HBA_BUFFER_SETUP) {
1332 			ql_free_phys(ha, &ha->hba_buf);
1333 			progress &= ~QL_HBA_BUFFER_SETUP;
1334 		}
1335 
1336 		if (progress & QL_REGS_MAPPED) {
1337 			ddi_regs_map_free(&ha->dev_handle);
1338 			if (ha->sbus_fpga_iobase != NULL) {
1339 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1340 			}
1341 			progress &= ~QL_REGS_MAPPED;
1342 		}
1343 
1344 		if (progress & QL_SOFT_STATE_ALLOCED) {
1345 
1346 			ql_fcache_rel(ha->fcache);
1347 
1348 			ASSERT(ha->dev && ha->outstanding_cmds &&
1349 			    ha->ub_array && ha->adapter_stats);
1350 
1351 			kmem_free(ha->adapter_stats,
1352 			    sizeof (*ha->adapter_stats));
1353 
1354 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1355 			    QL_UB_LIMIT);
1356 
1357 			kmem_free(ha->outstanding_cmds,
1358 			    sizeof (*ha->outstanding_cmds) *
1359 			    MAX_OUTSTANDING_COMMANDS);
1360 
1361 			if (ha->devpath != NULL) {
1362 				kmem_free(ha->devpath,
1363 				    strlen(ha->devpath) + 1);
1364 			}
1365 
1366 			kmem_free(ha->dev, sizeof (*ha->dev) *
1367 			    DEVICE_HEAD_LIST_SIZE);
1368 
1369 			if (ha->xioctl != NULL) {
1370 				ql_free_xioctl_resource(ha);
1371 			}
1372 
1373 			if (ha->fw_module != NULL) {
1374 				(void) ddi_modclose(ha->fw_module);
1375 			}
1376 
1377 			ddi_soft_state_free(ql_state, instance);
1378 			progress &= ~QL_SOFT_STATE_ALLOCED;
1379 		}
1380 		ASSERT(progress == 0);
1381 
1382 		ddi_prop_remove_all(dip);
1383 		rval = DDI_FAILURE;
1384 		break;
1385 
1386 	case DDI_RESUME:
1387 		rval = DDI_FAILURE;
1388 
1389 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1390 		if (ha == NULL) {
1391 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1392 			    QL_NAME, instance);
1393 			break;
1394 		}
1395 
1396 		if (ha->flags & TARGET_MODE_INITIALIZED) {
1397 			/* Enable Target Mode */
1398 			ha->init_ctrl_blk.cb.lun_enables[0] = (uint8_t)
1399 			    (ha->init_ctrl_blk.cb.lun_enables[0] | 0x01);
1400 			ha->init_ctrl_blk.cb.immediate_notify_resouce_count =
1401 			    ha->ub_notify_count;
1402 			ha->init_ctrl_blk.cb.command_resouce_count =
1403 			    ha->ub_command_count;
1404 		} else {
1405 			ha->init_ctrl_blk.cb.lun_enables[0] = 0;
1406 			ha->init_ctrl_blk.cb.lun_enables[1] = 0;
1407 			ha->init_ctrl_blk.cb.immediate_notify_resouce_count =
1408 			    0;
1409 			ha->init_ctrl_blk.cb.command_resouce_count = 0;
1410 		}
1411 
1412 		ha->power_level = PM_LEVEL_D3;
1413 		if (ha->pm_capable) {
1414 			/*
1415 			 * Get ql_power to do power on initialization
1416 			 */
1417 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1418 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1419 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1420 				    " power", QL_NAME, instance);
1421 			}
1422 		}
1423 
1424 		/*
1425 		 * There is a bug in DR that prevents PM framework
1426 		 * from calling ql_power.
1427 		 */
1428 		if (ha->power_level == PM_LEVEL_D3) {
1429 			ha->power_level = PM_LEVEL_D0;
1430 
1431 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1432 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1433 				    " adapter", QL_NAME, instance);
1434 			}
1435 
1436 			/* Wake up task_daemon. */
1437 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1438 			    0);
1439 		}
1440 
1441 		/* Acquire global state lock. */
1442 		GLOBAL_STATE_LOCK();
1443 
1444 		/* Restart driver timer. */
1445 		if (ql_timer_timeout_id == NULL) {
1446 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1447 			    ql_timer_ticks);
1448 		}
1449 
1450 		/* Release global state lock. */
1451 		GLOBAL_STATE_UNLOCK();
1452 
1453 		/* Wake up command start routine. */
1454 		ADAPTER_STATE_LOCK(ha);
1455 		ha->flags &= ~ADAPTER_SUSPENDED;
1456 		ADAPTER_STATE_UNLOCK(ha);
1457 
1458 		/*
1459 		 * Transport doesn't make FC discovery in polled
1460 		 * mode; So we need the daemon thread's services
1461 		 * right here.
1462 		 */
1463 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1464 
1465 		rval = DDI_SUCCESS;
1466 
1467 		/* Restart IP if it was running. */
1468 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1469 			(void) ql_initialize_ip(ha);
1470 			ql_isp_rcvbuf(ha);
1471 		}
1472 		break;
1473 
1474 	default:
1475 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1476 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1477 		rval = DDI_FAILURE;
1478 		break;
1479 	}
1480 
1481 	kmem_free(buf, MAXPATHLEN);
1482 
1483 	if (rval != DDI_SUCCESS) {
1484 		/*EMPTY*/
1485 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1486 		    ddi_get_instance(dip), rval);
1487 	} else {
1488 		/*EMPTY*/
1489 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1490 	}
1491 
1492 	return (rval);
1493 }
1494 
1495 /*
1496  * ql_detach
1497  *	Used to remove all the states associated with a given
1498  *	instances of a device node prior to the removal of that
1499  *	instance from the system.
1500  *
1501  * Input:
1502  *	dip = pointer to device information structure.
1503  *	cmd = type of detach.
1504  *
1505  * Returns:
1506  *	DDI_SUCCESS or DDI_FAILURE.
1507  *
1508  * Context:
1509  *	Kernel context.
1510  */
1511 static int
1512 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1513 {
1514 	ql_adapter_state_t	*ha, *vha;
1515 	ql_tgt_t		*tq;
1516 	int			try;
1517 	uint16_t		index;
1518 	ql_link_t		*link;
1519 	char			*buf;
1520 	timeout_id_t		timer_id = NULL;
1521 	int			rval = DDI_SUCCESS;
1522 
1523 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1524 	if (ha == NULL) {
1525 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1526 		    ddi_get_instance(dip));
1527 		return (DDI_FAILURE);
1528 	}
1529 
1530 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1531 
1532 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1533 
1534 	switch (cmd) {
1535 	case DDI_DETACH:
1536 		ADAPTER_STATE_LOCK(ha);
1537 		ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1538 		ADAPTER_STATE_UNLOCK(ha);
1539 
1540 		/* Acquire task daemon lock. */
1541 		TASK_DAEMON_LOCK(ha);
1542 
1543 		ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1544 		cv_signal(&ha->cv_task_daemon);
1545 
1546 		/* Release task daemon lock. */
1547 		TASK_DAEMON_UNLOCK(ha);
1548 
1549 		/*
1550 		 * Wait for task daemon to stop running.
1551 		 * Internal command timeout is approximately
1552 		 * 30 seconds, so it would help in some corner
1553 		 * cases to wait that long
1554 		 */
1555 		try = 0;
1556 		while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) &&
1557 		    try < 3000) {
1558 			ql_delay(ha, 10000);
1559 			try++;
1560 		}
1561 
1562 		TASK_DAEMON_LOCK(ha);
1563 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1564 			ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1565 			TASK_DAEMON_UNLOCK(ha);
1566 			EL(ha, "failed, could not stop task daemon\n");
1567 			return (DDI_FAILURE);
1568 		}
1569 		TASK_DAEMON_UNLOCK(ha);
1570 
1571 		/* Acquire global state lock. */
1572 		GLOBAL_STATE_LOCK();
1573 
1574 		/* Disable driver timer if no adapters. */
1575 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1576 		    ql_hba.last == &ha->hba) {
1577 			timer_id = ql_timer_timeout_id;
1578 			ql_timer_timeout_id = NULL;
1579 		}
1580 		ql_remove_link(&ql_hba, &ha->hba);
1581 
1582 		GLOBAL_STATE_UNLOCK();
1583 
1584 		if (timer_id) {
1585 			(void) untimeout(timer_id);
1586 		}
1587 
1588 		if (ha->pm_capable) {
1589 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1590 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1591 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1592 				    " power", QL_NAME, ha->instance);
1593 			}
1594 		}
1595 
1596 		/*
1597 		 * If pm_lower_power shutdown the adapter, there
1598 		 * isn't much else to do
1599 		 */
1600 		if (ha->power_level != PM_LEVEL_D3) {
1601 			ql_halt(ha, PM_LEVEL_D3);
1602 		}
1603 
1604 		/* Remove virtual ports. */
1605 		while ((vha = ha->vp_next) != NULL) {
1606 			ql_vport_destroy(vha);
1607 		}
1608 
1609 		/* Free target queues. */
1610 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1611 			link = ha->dev[index].first;
1612 			while (link != NULL) {
1613 				tq = link->base_address;
1614 				link = link->next;
1615 				ql_dev_free(ha, tq);
1616 			}
1617 		}
1618 
1619 		/*
1620 		 * Free unsolicited buffers.
1621 		 * If we are here then there are no ULPs still
1622 		 * alive that wish to talk to ql so free up
1623 		 * any SRB_IP_UB_UNUSED buffers that are
1624 		 * lingering around
1625 		 */
1626 		QL_UB_LOCK(ha);
1627 		for (index = 0; index < QL_UB_LIMIT; index++) {
1628 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1629 
1630 			if (ubp != NULL) {
1631 				ql_srb_t *sp = ubp->ub_fca_private;
1632 
1633 				sp->flags |= SRB_UB_FREE_REQUESTED;
1634 
1635 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1636 				    (sp->flags & (SRB_UB_CALLBACK |
1637 				    SRB_UB_ACQUIRED))) {
1638 					QL_UB_UNLOCK(ha);
1639 					delay(drv_usectohz(100000));
1640 					QL_UB_LOCK(ha);
1641 				}
1642 				ha->ub_array[index] = NULL;
1643 
1644 				QL_UB_UNLOCK(ha);
1645 				ql_free_unsolicited_buffer(ha, ubp);
1646 				QL_UB_LOCK(ha);
1647 			}
1648 		}
1649 		QL_UB_UNLOCK(ha);
1650 
1651 		/* Free any saved RISC code. */
1652 		if (ha->risc_code != NULL) {
1653 			kmem_free(ha->risc_code, ha->risc_code_size);
1654 			ha->risc_code = NULL;
1655 			ha->risc_code_size = 0;
1656 		}
1657 
1658 		if (ha->fw_module != NULL) {
1659 			(void) ddi_modclose(ha->fw_module);
1660 			ha->fw_module = NULL;
1661 		}
1662 
1663 		/* Free resources. */
1664 		ddi_prop_remove_all(dip);
1665 		(void) fc_fca_detach(dip);
1666 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1667 		ddi_remove_minor_node(dip, "devctl");
1668 		if (ha->k_stats != NULL) {
1669 			kstat_delete(ha->k_stats);
1670 		}
1671 
1672 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1673 			ddi_regs_map_free(&ha->sbus_config_handle);
1674 		} else {
1675 			ddi_regs_map_free(&ha->iomap_dev_handle);
1676 			pci_config_teardown(&ha->pci_handle);
1677 		}
1678 
1679 		ql_disable_intr(ha);
1680 		ql_release_intr(ha);
1681 
1682 		ql_free_xioctl_resource(ha);
1683 
1684 		ql_destroy_mutex(ha);
1685 
1686 		ql_free_phys(ha, &ha->hba_buf);
1687 		ql_free_phys(ha, &ha->fwexttracebuf);
1688 		ql_free_phys(ha, &ha->fwfcetracebuf);
1689 
1690 		ddi_regs_map_free(&ha->dev_handle);
1691 		if (ha->sbus_fpga_iobase != NULL) {
1692 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1693 		}
1694 
1695 		ql_fcache_rel(ha->fcache);
1696 		if (ha->vcache != NULL) {
1697 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1698 		}
1699 
1700 		if (ha->pi_attrs != NULL) {
1701 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1702 		}
1703 
1704 		ASSERT(ha->dev && ha->outstanding_cmds && ha->ub_array &&
1705 		    ha->adapter_stats);
1706 
1707 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1708 
1709 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1710 
1711 		kmem_free(ha->outstanding_cmds,
1712 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1713 
1714 		if (ha->devpath != NULL) {
1715 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1716 		}
1717 
1718 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1719 
1720 		EL(ha, "detached\n");
1721 
1722 		ddi_soft_state_free(ql_state, (int)ha->instance);
1723 
1724 		break;
1725 
1726 	case DDI_SUSPEND:
1727 		ADAPTER_STATE_LOCK(ha);
1728 
1729 		try = 0;
1730 		ha->flags |= ADAPTER_SUSPENDED;
1731 		while (ha->flags & ADAPTER_TIMER_BUSY && try++ < 10) {
1732 			ADAPTER_STATE_UNLOCK(ha);
1733 			delay(drv_usectohz(1000000));
1734 			ADAPTER_STATE_LOCK(ha);
1735 		}
1736 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1737 			ha->flags &= ~ADAPTER_SUSPENDED;
1738 			ADAPTER_STATE_UNLOCK(ha);
1739 			rval = DDI_FAILURE;
1740 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1741 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1742 			    ha->busy, ha->flags);
1743 			break;
1744 		}
1745 
1746 		ADAPTER_STATE_UNLOCK(ha);
1747 
1748 		if (ha->flags & IP_INITIALIZED) {
1749 			(void) ql_shutdown_ip(ha);
1750 		}
1751 
1752 		try = ql_suspend_adapter(ha);
1753 		if (try != QL_SUCCESS) {
1754 			ADAPTER_STATE_LOCK(ha);
1755 			ha->flags &= ~ADAPTER_SUSPENDED;
1756 			ADAPTER_STATE_UNLOCK(ha);
1757 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1758 			    QL_NAME, ha->instance, try);
1759 
1760 			/* Restart IP if it was running. */
1761 			if (ha->flags & IP_ENABLED &&
1762 			    !(ha->flags & IP_INITIALIZED)) {
1763 				(void) ql_initialize_ip(ha);
1764 				ql_isp_rcvbuf(ha);
1765 			}
1766 			rval = DDI_FAILURE;
1767 			break;
1768 		}
1769 
1770 		/* Acquire global state lock. */
1771 		GLOBAL_STATE_LOCK();
1772 
1773 		/* Disable driver timer if last adapter. */
1774 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1775 		    ql_hba.last == &ha->hba) {
1776 			timer_id = ql_timer_timeout_id;
1777 			ql_timer_timeout_id = NULL;
1778 		}
1779 		GLOBAL_STATE_UNLOCK();
1780 
1781 		if (timer_id) {
1782 			(void) untimeout(timer_id);
1783 		}
1784 
1785 		break;
1786 
1787 	default:
1788 		rval = DDI_FAILURE;
1789 		break;
1790 	}
1791 
1792 	kmem_free(buf, MAXPATHLEN);
1793 
1794 	if (rval != DDI_SUCCESS) {
1795 		if (ha != NULL) {
1796 			EL(ha, "failed, rval = %xh\n", rval);
1797 		} else {
1798 			/*EMPTY*/
1799 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1800 			    ddi_get_instance(dip), rval);
1801 		}
1802 	} else {
1803 		/*EMPTY*/
1804 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1805 	}
1806 
1807 	return (rval);
1808 }
1809 
1810 /*
1811  * ql_power
1812  *	Power a device attached to the system.
1813  *
1814  * Input:
1815  *	dip = pointer to device information structure.
1816  *	component = device.
1817  *	level = power level.
1818  *
1819  * Returns:
1820  *	DDI_SUCCESS or DDI_FAILURE.
1821  *
1822  * Context:
1823  *	Kernel context.
1824  */
1825 /* ARGSUSED */
1826 static int
1827 ql_power(dev_info_t *dip, int component, int level)
1828 {
1829 	int			rval = DDI_FAILURE;
1830 	off_t			csr;
1831 	uint8_t			saved_pm_val;
1832 	ql_adapter_state_t	*ha;
1833 	char			*buf;
1834 	char			*path;
1835 
1836 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1837 	if (ha == NULL || ha->pm_capable == 0) {
1838 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1839 		    ddi_get_instance(dip));
1840 		return (rval);
1841 	}
1842 
1843 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1844 
1845 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1846 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1847 
1848 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1849 	    level != PM_LEVEL_D3)) {
1850 		EL(ha, "invalid, component=%xh or level=%xh\n",
1851 		    component, level);
1852 		return (rval);
1853 	}
1854 
1855 	GLOBAL_HW_LOCK();
1856 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1857 	GLOBAL_HW_UNLOCK();
1858 
1859 	ASSERT(csr == QL_PM_CS_REG);
1860 
1861 	(void) snprintf(buf, sizeof (buf),
1862 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1863 	    ddi_pathname(dip, path));
1864 
1865 	switch (level) {
1866 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1867 
1868 		QL_PM_LOCK(ha);
1869 		if (ha->power_level == PM_LEVEL_D0) {
1870 			QL_PM_UNLOCK(ha);
1871 			rval = DDI_SUCCESS;
1872 			break;
1873 		}
1874 
1875 		/*
1876 		 * Enable interrupts now
1877 		 */
1878 		saved_pm_val = ha->power_level;
1879 		ha->power_level = PM_LEVEL_D0;
1880 		QL_PM_UNLOCK(ha);
1881 
1882 		GLOBAL_HW_LOCK();
1883 
1884 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1885 
1886 		/*
1887 		 * Delay after reset, for chip to recover.
1888 		 * Otherwise causes system PANIC
1889 		 */
1890 		drv_usecwait(200000);
1891 
1892 		GLOBAL_HW_UNLOCK();
1893 
1894 		if (ha->config_saved) {
1895 			ha->config_saved = 0;
1896 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1897 				QL_PM_LOCK(ha);
1898 				ha->power_level = saved_pm_val;
1899 				QL_PM_UNLOCK(ha);
1900 				cmn_err(CE_WARN, "%s failed to restore "
1901 				    "config regs", buf);
1902 				break;
1903 			}
1904 		}
1905 
1906 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1907 			cmn_err(CE_WARN, "%s adapter initialization failed",
1908 			    buf);
1909 		}
1910 
1911 		/* Wake up task_daemon. */
1912 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1913 		    TASK_DAEMON_SLEEPING_FLG, 0);
1914 
1915 		/* Restart IP if it was running. */
1916 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1917 			(void) ql_initialize_ip(ha);
1918 			ql_isp_rcvbuf(ha);
1919 		}
1920 
1921 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1922 		    ha->instance, QL_NAME);
1923 
1924 		rval = DDI_SUCCESS;
1925 		break;
1926 
1927 	case PM_LEVEL_D3:	/* power down to D3 state - off */
1928 
1929 		QL_PM_LOCK(ha);
1930 
1931 		if (ha->busy || ((ha->task_daemon_flags &
1932 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1933 			QL_PM_UNLOCK(ha);
1934 			break;
1935 		}
1936 
1937 		if (ha->power_level == PM_LEVEL_D3) {
1938 			rval = DDI_SUCCESS;
1939 			QL_PM_UNLOCK(ha);
1940 			break;
1941 		}
1942 		QL_PM_UNLOCK(ha);
1943 
1944 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1945 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
1946 			    " config regs", QL_NAME, ha->instance, buf);
1947 			break;
1948 		}
1949 		ha->config_saved = 1;
1950 
1951 		/*
1952 		 * Don't enable interrupts. Running mailbox commands with
1953 		 * interrupts enabled could cause hangs since pm_run_scan()
1954 		 * runs out of a callout thread and on single cpu systems
1955 		 * cv_timedwait(), called from ql_mailbox_command(), would
1956 		 * not get to run.
1957 		 */
1958 		TASK_DAEMON_LOCK(ha);
1959 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
1960 		TASK_DAEMON_UNLOCK(ha);
1961 
1962 		ql_halt(ha, PM_LEVEL_D3);
1963 
1964 		/*
1965 		 * Setup ql_intr to ignore interrupts from here on.
1966 		 */
1967 		QL_PM_LOCK(ha);
1968 		ha->power_level = PM_LEVEL_D3;
1969 		QL_PM_UNLOCK(ha);
1970 
1971 		/*
1972 		 * Wait for ISR to complete.
1973 		 */
1974 		INTR_LOCK(ha);
1975 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
1976 		INTR_UNLOCK(ha);
1977 
1978 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
1979 		    ha->instance, QL_NAME);
1980 
1981 		rval = DDI_SUCCESS;
1982 		break;
1983 	}
1984 
1985 	kmem_free(buf, MAXPATHLEN);
1986 	kmem_free(path, MAXPATHLEN);
1987 
1988 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
1989 
1990 	return (rval);
1991 }
1992 
1993 /*
1994  * ql_quiesce
1995  *	quiesce a device attached to the system.
1996  *
1997  * Input:
1998  *	dip = pointer to device information structure.
1999  *
2000  * Returns:
2001  *	DDI_SUCCESS
2002  *
2003  * Context:
2004  *	Kernel context.
2005  */
2006 /* ARGSUSED */
2007 static int
2008 ql_quiesce(dev_info_t *dip)
2009 {
2010 	ql_adapter_state_t	*ha;
2011 	uint32_t		timer;
2012 	uint32_t		stat;
2013 
2014 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2015 	if (ha == NULL) {
2016 		/* Oh well.... */
2017 		return (DDI_SUCCESS);
2018 	}
2019 
2020 	if (CFG_IST(ha, CFG_CTRL_2425)) {
2021 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2022 		WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE);
2023 		WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2024 		for (timer = 0; timer < 30000; timer++) {
2025 			stat = RD32_IO_REG(ha, intr_info_lo);
2026 			if (stat & BIT_15) {
2027 				if ((stat & 0xff) < 0x12) {
2028 					WRT32_IO_REG(ha, hccr,
2029 					    HC24_CLR_RISC_INT);
2030 					break;
2031 				}
2032 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2033 			}
2034 			drv_usecwait(100);
2035 		}
2036 		/* Reset the chip. */
2037 		WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2038 		    MWB_4096_BYTES);
2039 		drv_usecwait(100);
2040 
2041 	} else {
2042 		/* Disable ISP interrupts. */
2043 		WRT16_IO_REG(ha, ictrl, 0);
2044 		/* Select RISC module registers. */
2045 		WRT16_IO_REG(ha, ctrl_status, 0);
2046 		/* Reset ISP semaphore. */
2047 		WRT16_IO_REG(ha, semaphore, 0);
2048 		/* Reset RISC module. */
2049 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2050 		/* Release RISC module. */
2051 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2052 	}
2053 
2054 	return (DDI_SUCCESS);
2055 }
2056 
2057 
2058 /* ************************************************************************ */
2059 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2060 /* ************************************************************************ */
2061 
2062 /*
2063  * ql_bind_port
2064  *	Handling port binding. The FC Transport attempts to bind an FCA port
2065  *	when it is ready to start transactions on the port. The FC Transport
2066  *	will call the fca_bind_port() function specified in the fca_transport
2067  *	structure it receives. The FCA must fill in the port_info structure
2068  *	passed in the call and also stash the information for future calls.
2069  *
2070  * Input:
2071  *	dip = pointer to FCA information structure.
2072  *	port_info = pointer to port information structure.
2073  *	bind_info = pointer to bind information structure.
2074  *
2075  * Returns:
2076  *	NULL = failure
2077  *
2078  * Context:
2079  *	Kernel context.
2080  */
2081 static opaque_t
2082 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2083     fc_fca_bind_info_t *bind_info)
2084 {
2085 	ql_adapter_state_t	*ha, *vha;
2086 	opaque_t		fca_handle = NULL;
2087 	port_id_t		d_id;
2088 	int			port_npiv = bind_info->port_npiv;
2089 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2090 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2091 
2092 	/* get state info based on the dip */
2093 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2094 	if (ha == NULL) {
2095 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2096 		    (void *)fca_handle);
2097 		return (NULL);
2098 	}
2099 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2100 
2101 	/* Verify port number is supported. */
2102 	if (port_npiv != 0) {
2103 		if (!(ha->flags & VP_ENABLED)) {
2104 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2105 			    ha->instance);
2106 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2107 			return (NULL);
2108 		}
2109 		if (!(ha->flags & POINT_TO_POINT)) {
2110 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2111 			    ha->instance);
2112 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2113 			return (NULL);
2114 		}
2115 		if (!(ha->flags & FDISC_ENABLED)) {
2116 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2117 			    "FDISC\n", ha->instance);
2118 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2119 			return (NULL);
2120 		}
2121 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2122 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2123 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2124 			    "FC_OUTOFBOUNDS\n", ha->instance);
2125 			port_info->pi_error = FC_OUTOFBOUNDS;
2126 			return (NULL);
2127 		}
2128 	} else if (bind_info->port_num != 0) {
2129 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2130 		    "supported\n", ha->instance, bind_info->port_num);
2131 		port_info->pi_error = FC_OUTOFBOUNDS;
2132 		return (NULL);
2133 	}
2134 
2135 	/* Locate port context. */
2136 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2137 		if (vha->vp_index == bind_info->port_num) {
2138 			break;
2139 		}
2140 	}
2141 
2142 	/* If virtual port does not exist. */
2143 	if (vha == NULL) {
2144 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2145 	}
2146 
2147 	/* make sure this port isn't already bound */
2148 	if (vha->flags & FCA_BOUND) {
2149 		port_info->pi_error = FC_ALREADY;
2150 	} else {
2151 		if (vha->vp_index != 0) {
2152 			bcopy(port_nwwn,
2153 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2154 			bcopy(port_pwwn,
2155 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2156 		}
2157 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2158 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2159 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2160 				    "virtual port=%d\n", ha->instance,
2161 				    vha->vp_index);
2162 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2163 				return (NULL);
2164 			}
2165 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2166 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2167 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2168 			    QL_NAME, ha->instance, vha->vp_index,
2169 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2170 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2171 			    port_pwwn[6], port_pwwn[7],
2172 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2173 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2174 			    port_nwwn[6], port_nwwn[7]);
2175 		}
2176 
2177 		/* stash the bind_info supplied by the FC Transport */
2178 		vha->bind_info.port_handle = bind_info->port_handle;
2179 		vha->bind_info.port_statec_cb =
2180 		    bind_info->port_statec_cb;
2181 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2182 
2183 		/* Set port's source ID. */
2184 		port_info->pi_s_id.port_id = vha->d_id.b24;
2185 
2186 		/* copy out the default login parameters */
2187 		bcopy((void *)&vha->loginparams,
2188 		    (void *)&port_info->pi_login_params,
2189 		    sizeof (la_els_logi_t));
2190 
2191 		/* Set port's hard address if enabled. */
2192 		port_info->pi_hard_addr.hard_addr = 0;
2193 		if (bind_info->port_num == 0) {
2194 			d_id.b24 = ha->d_id.b24;
2195 			if (CFG_IST(ha, CFG_CTRL_2425)) {
2196 				if (ha->init_ctrl_blk.cb24.
2197 				    firmware_options_1[0] & BIT_0) {
2198 					d_id.b.al_pa = ql_index_to_alpa[ha->
2199 					    init_ctrl_blk.cb24.
2200 					    hard_address[0]];
2201 					port_info->pi_hard_addr.hard_addr =
2202 					    d_id.b24;
2203 				}
2204 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2205 			    BIT_0) {
2206 				d_id.b.al_pa = ql_index_to_alpa[ha->
2207 				    init_ctrl_blk.cb.hard_address[0]];
2208 				port_info->pi_hard_addr.hard_addr = d_id.b24;
2209 			}
2210 
2211 			/* Set the node id data */
2212 			if (ql_get_rnid_params(ha,
2213 			    sizeof (port_info->pi_rnid_params.params),
2214 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2215 			    QL_SUCCESS) {
2216 				port_info->pi_rnid_params.status = FC_SUCCESS;
2217 			} else {
2218 				port_info->pi_rnid_params.status = FC_FAILURE;
2219 			}
2220 
2221 			/* Populate T11 FC-HBA details */
2222 			ql_populate_hba_fru_details(ha, port_info);
2223 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2224 			    KM_SLEEP);
2225 			if (ha->pi_attrs != NULL) {
2226 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2227 				    sizeof (fca_port_attrs_t));
2228 			}
2229 		} else {
2230 			port_info->pi_rnid_params.status = FC_FAILURE;
2231 			if (ha->pi_attrs != NULL) {
2232 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2233 				    sizeof (fca_port_attrs_t));
2234 			}
2235 		}
2236 
2237 		/* Generate handle for this FCA. */
2238 		fca_handle = (opaque_t)vha;
2239 
2240 		ADAPTER_STATE_LOCK(ha);
2241 		vha->flags |= FCA_BOUND;
2242 		ADAPTER_STATE_UNLOCK(ha);
2243 		/* Set port's current state. */
2244 		port_info->pi_port_state = vha->state;
2245 	}
2246 
2247 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2248 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2249 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2250 
2251 	return (fca_handle);
2252 }
2253 
2254 /*
2255  * ql_unbind_port
2256  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2257  *
2258  * Input:
2259  *	fca_handle = handle setup by ql_bind_port().
2260  *
2261  * Context:
2262  *	Kernel context.
2263  */
2264 static void
2265 ql_unbind_port(opaque_t fca_handle)
2266 {
2267 	ql_adapter_state_t	*ha;
2268 	ql_tgt_t		*tq;
2269 	uint32_t		flgs;
2270 
2271 	ha = ql_fca_handle_to_state(fca_handle);
2272 	if (ha == NULL) {
2273 		/*EMPTY*/
2274 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2275 		    (void *)fca_handle);
2276 	} else {
2277 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2278 		    ha->vp_index);
2279 
2280 		if (!(ha->flags & FCA_BOUND)) {
2281 			/*EMPTY*/
2282 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2283 			    ha->instance, ha->vp_index);
2284 		} else {
2285 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2286 				if ((tq = ql_loop_id_to_queue(ha,
2287 				    FL_PORT_24XX_HDL)) != NULL) {
2288 					(void) ql_log_iocb(ha, tq, tq->loop_id,
2289 					    CFO_FREE_N_PORT_HANDLE |
2290 					    CFO_EXPLICIT_LOGO | CF_CMD_LOGO,
2291 					    NULL);
2292 				}
2293 				(void) ql_vport_control(ha,
2294 				    VPC_DISABLE_INIT);
2295 				flgs = FCA_BOUND | VP_ENABLED;
2296 			} else {
2297 				flgs = FCA_BOUND;
2298 			}
2299 			ADAPTER_STATE_LOCK(ha);
2300 			ha->flags &= ~flgs;
2301 			ADAPTER_STATE_UNLOCK(ha);
2302 		}
2303 
2304 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2305 		    ha->vp_index);
2306 	}
2307 }
2308 
2309 /*
2310  * ql_init_pkt
2311  *	Initialize FCA portion of packet.
2312  *
2313  * Input:
2314  *	fca_handle = handle setup by ql_bind_port().
2315  *	pkt = pointer to fc_packet.
2316  *
2317  * Returns:
2318  *	FC_SUCCESS - the packet has successfully been initialized.
2319  *	FC_UNBOUND - the fca_handle specified is not bound.
2320  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2321  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2322  *
2323  * Context:
2324  *	Kernel context.
2325  */
2326 /* ARGSUSED */
2327 static int
2328 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2329 {
2330 	ql_adapter_state_t	*ha;
2331 	ql_srb_t		*sp;
2332 
2333 	ha = ql_fca_handle_to_state(fca_handle);
2334 	if (ha == NULL) {
2335 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2336 		    (void *)fca_handle);
2337 		return (FC_UNBOUND);
2338 	}
2339 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2340 
2341 	ASSERT(ha->power_level == PM_LEVEL_D0);
2342 
2343 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2344 	sp->flags = 0;
2345 
2346 	/* init cmd links */
2347 	sp->cmd.base_address = sp;
2348 	sp->cmd.prev = NULL;
2349 	sp->cmd.next = NULL;
2350 	sp->cmd.head = NULL;
2351 
2352 	/* init watchdog links */
2353 	sp->wdg.base_address = sp;
2354 	sp->wdg.prev = NULL;
2355 	sp->wdg.next = NULL;
2356 	sp->wdg.head = NULL;
2357 	sp->pkt = pkt;
2358 	sp->ha = ha;
2359 	sp->magic_number = QL_FCA_BRAND;
2360 
2361 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2362 
2363 	return (FC_SUCCESS);
2364 }
2365 
2366 /*
2367  * ql_un_init_pkt
2368  *	Release all local resources bound to packet.
2369  *
2370  * Input:
2371  *	fca_handle = handle setup by ql_bind_port().
2372  *	pkt = pointer to fc_packet.
2373  *
2374  * Returns:
2375  *	FC_SUCCESS - the packet has successfully been invalidated.
2376  *	FC_UNBOUND - the fca_handle specified is not bound.
2377  *	FC_BADPACKET - the packet has not been initialized or has
2378  *			already been freed by this FCA.
2379  *
2380  * Context:
2381  *	Kernel context.
2382  */
2383 static int
2384 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2385 {
2386 	ql_adapter_state_t *ha;
2387 	int rval;
2388 	ql_srb_t *sp;
2389 
2390 	ha = ql_fca_handle_to_state(fca_handle);
2391 	if (ha == NULL) {
2392 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2393 		    (void *)fca_handle);
2394 		return (FC_UNBOUND);
2395 	}
2396 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2397 
2398 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2399 	ASSERT(sp->magic_number == QL_FCA_BRAND);
2400 
2401 	if (sp->magic_number != QL_FCA_BRAND) {
2402 		EL(ha, "failed, FC_BADPACKET\n");
2403 		rval = FC_BADPACKET;
2404 	} else {
2405 		sp->magic_number = NULL;
2406 
2407 		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
2408 		    SRB_IN_TOKEN_ARRAY)) == 0);
2409 
2410 		rval = FC_SUCCESS;
2411 	}
2412 
2413 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2414 
2415 	return (rval);
2416 }
2417 
2418 /*
2419  * ql_els_send
2420  *	Issue a extended link service request.
2421  *
2422  * Input:
2423  *	fca_handle = handle setup by ql_bind_port().
2424  *	pkt = pointer to fc_packet.
2425  *
2426  * Returns:
2427  *	FC_SUCCESS - the command was successful.
2428  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2429  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2430  *	FC_TRANSPORT_ERROR - a transport error occurred.
2431  *	FC_UNBOUND - the fca_handle specified is not bound.
2432  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2433  *
2434  * Context:
2435  *	Kernel context.
2436  */
2437 static int
2438 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2439 {
2440 	ql_adapter_state_t	*ha;
2441 	int			rval;
2442 	clock_t			timer;
2443 	ls_code_t		els;
2444 	la_els_rjt_t		rjt;
2445 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2446 
2447 	/* Verify proper command. */
2448 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2449 	if (ha == NULL) {
2450 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2451 		    rval, fca_handle);
2452 		return (FC_INVALID_REQUEST);
2453 	}
2454 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2455 
2456 	ASSERT(ha->power_level == PM_LEVEL_D0);
2457 
2458 	/* Wait for suspension to end. */
2459 	TASK_DAEMON_LOCK(ha);
2460 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2461 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2462 
2463 		/* 30 seconds from now */
2464 		timer = ddi_get_lbolt();
2465 		timer += drv_usectohz(30000000);
2466 
2467 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2468 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2469 			/*
2470 			 * The timeout time 'timer' was
2471 			 * reached without the condition
2472 			 * being signaled.
2473 			 */
2474 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2475 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2476 
2477 			/* Release task daemon lock. */
2478 			TASK_DAEMON_UNLOCK(ha);
2479 
2480 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2481 			    QL_FUNCTION_TIMEOUT);
2482 			return (FC_TRAN_BUSY);
2483 		}
2484 	}
2485 	/* Release task daemon lock. */
2486 	TASK_DAEMON_UNLOCK(ha);
2487 
2488 	/* Setup response header. */
2489 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2490 	    sizeof (fc_frame_hdr_t));
2491 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2492 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2493 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2494 	    R_CTL_SOLICITED_CONTROL;
2495 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2496 	    F_CTL_END_SEQ;
2497 
2498 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2499 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2500 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2501 
2502 	/* map the type of ELS to a function */
2503 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2504 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2505 
2506 #if 0
2507 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2508 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2509 	    sizeof (fc_frame_hdr_t) / 4);
2510 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2511 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2512 #endif
2513 	switch (els.ls_code) {
2514 	case LA_ELS_RJT:
2515 	case LA_ELS_ACC:
2516 		pkt->pkt_state = FC_PKT_SUCCESS;
2517 		rval = FC_SUCCESS;
2518 		break;
2519 	case LA_ELS_PLOGI:
2520 	case LA_ELS_PDISC:
2521 		rval = ql_els_plogi(ha, pkt);
2522 		break;
2523 	case LA_ELS_FLOGI:
2524 	case LA_ELS_FDISC:
2525 		rval = ql_els_flogi(ha, pkt);
2526 		break;
2527 	case LA_ELS_LOGO:
2528 		rval = ql_els_logo(ha, pkt);
2529 		break;
2530 	case LA_ELS_PRLI:
2531 		rval = ql_els_prli(ha, pkt);
2532 		break;
2533 	case LA_ELS_PRLO:
2534 		rval = ql_els_prlo(ha, pkt);
2535 		break;
2536 	case LA_ELS_ADISC:
2537 		rval = ql_els_adisc(ha, pkt);
2538 		break;
2539 	case LA_ELS_LINIT:
2540 		rval = ql_els_linit(ha, pkt);
2541 		break;
2542 	case LA_ELS_LPC:
2543 		rval = ql_els_lpc(ha, pkt);
2544 		break;
2545 	case LA_ELS_LSTS:
2546 		rval = ql_els_lsts(ha, pkt);
2547 		break;
2548 	case LA_ELS_SCR:
2549 		rval = ql_els_scr(ha, pkt);
2550 		break;
2551 	case LA_ELS_RSCN:
2552 		rval = ql_els_rscn(ha, pkt);
2553 		break;
2554 	case LA_ELS_FARP_REQ:
2555 		rval = ql_els_farp_req(ha, pkt);
2556 		break;
2557 	case LA_ELS_FARP_REPLY:
2558 		rval = ql_els_farp_reply(ha, pkt);
2559 		break;
2560 	case LA_ELS_RLS:
2561 		rval = ql_els_rls(ha, pkt);
2562 		break;
2563 	case LA_ELS_RNID:
2564 		rval = ql_els_rnid(ha, pkt);
2565 		break;
2566 	default:
2567 		EL(ha, "failed=%xh, UNSUPPORTED\n", els.ls_code);
2568 		/* Build RJT. */
2569 		bzero(&rjt, sizeof (rjt));
2570 		rjt.ls_code.ls_code = LA_ELS_RJT;
2571 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2572 
2573 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2574 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2575 
2576 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2577 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2578 		rval = FC_SUCCESS;
2579 		break;
2580 	}
2581 
2582 #if 0
2583 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2584 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2585 	    sizeof (fc_frame_hdr_t) / 4);
2586 #endif
2587 	/* Do command callback only on error */
2588 	if (rval == FC_SUCCESS && !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
2589 	    pkt->pkt_comp) {
2590 		ql_awaken_task_daemon(ha, sp, 0, 0);
2591 	}
2592 
2593 	if (rval != FC_SUCCESS) {
2594 		EL(ha, "failed, rval = %xh\n", rval);
2595 	} else {
2596 		/*EMPTY*/
2597 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2598 	}
2599 	return (rval);
2600 }
2601 
2602 /*
2603  * ql_get_cap
2604  *	Export FCA hardware and software capabilities.
2605  *
2606  * Input:
2607  *	fca_handle = handle setup by ql_bind_port().
2608  *	cap = pointer to the capabilities string.
2609  *	ptr = buffer pointer for return capability.
2610  *
2611  * Returns:
2612  *	FC_CAP_ERROR - no such capability
2613  *	FC_CAP_FOUND - the capability was returned and cannot be set
2614  *	FC_CAP_SETTABLE - the capability was returned and can be set
2615  *	FC_UNBOUND - the fca_handle specified is not bound.
2616  *
2617  * Context:
2618  *	Kernel context.
2619  */
2620 static int
2621 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2622 {
2623 	ql_adapter_state_t	*ha;
2624 	int			rval;
2625 	uint32_t		*rptr = (uint32_t *)ptr;
2626 
2627 	ha = ql_fca_handle_to_state(fca_handle);
2628 	if (ha == NULL) {
2629 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2630 		    (void *)fca_handle);
2631 		return (FC_UNBOUND);
2632 	}
2633 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2634 
2635 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2636 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2637 		    ptr, 8);
2638 		rval = FC_CAP_FOUND;
2639 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2640 		bcopy((void *)&ha->loginparams, ptr,
2641 		    sizeof (la_els_logi_t));
2642 		rval = FC_CAP_FOUND;
2643 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2644 		*rptr = (uint32_t)QL_UB_LIMIT;
2645 		rval = FC_CAP_FOUND;
2646 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2647 
2648 		dev_info_t	*psydip = NULL;
2649 #ifdef __sparc
2650 		/*
2651 		 * Disable streaming for certain 2 chip adapters
2652 		 * below Psycho to handle Psycho byte hole issue.
2653 		 */
2654 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2655 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2656 			for (psydip = ddi_get_parent(ha->dip); psydip;
2657 			    psydip = ddi_get_parent(psydip)) {
2658 				if (strcmp(ddi_driver_name(psydip),
2659 				    "pcipsy") == 0) {
2660 					break;
2661 				}
2662 			}
2663 		}
2664 #endif	/* __sparc */
2665 
2666 		if (psydip) {
2667 			*rptr = (uint32_t)FC_NO_STREAMING;
2668 			EL(ha, "No Streaming\n");
2669 		} else {
2670 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2671 			EL(ha, "Allow Streaming\n");
2672 		}
2673 		rval = FC_CAP_FOUND;
2674 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2675 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2676 			*rptr = (uint32_t)CHAR_TO_SHORT(
2677 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2678 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2679 		} else {
2680 			*rptr = (uint32_t)CHAR_TO_SHORT(
2681 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2682 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2683 		}
2684 		rval = FC_CAP_FOUND;
2685 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2686 		*rptr = FC_RESET_RETURN_ALL;
2687 		rval = FC_CAP_FOUND;
2688 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2689 		*rptr = FC_NO_DVMA_SPACE;
2690 		rval = FC_CAP_FOUND;
2691 	} else {
2692 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2693 		rval = FC_CAP_ERROR;
2694 	}
2695 
2696 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2697 
2698 	return (rval);
2699 }
2700 
2701 /*
2702  * ql_set_cap
2703  *	Allow the FC Transport to set FCA capabilities if possible.
2704  *
2705  * Input:
2706  *	fca_handle = handle setup by ql_bind_port().
2707  *	cap = pointer to the capabilities string.
2708  *	ptr = buffer pointer for capability.
2709  *
2710  * Returns:
2711  *	FC_CAP_ERROR - no such capability
2712  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2713  *	FC_CAP_SETTABLE - the capability was successfully set.
2714  *	FC_UNBOUND - the fca_handle specified is not bound.
2715  *
2716  * Context:
2717  *	Kernel context.
2718  */
2719 /* ARGSUSED */
2720 static int
2721 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2722 {
2723 	ql_adapter_state_t	*ha;
2724 	int			rval;
2725 
2726 	ha = ql_fca_handle_to_state(fca_handle);
2727 	if (ha == NULL) {
2728 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2729 		    (void *)fca_handle);
2730 		return (FC_UNBOUND);
2731 	}
2732 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2733 
2734 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2735 		rval = FC_CAP_FOUND;
2736 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2737 		rval = FC_CAP_FOUND;
2738 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2739 		rval = FC_CAP_FOUND;
2740 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2741 		rval = FC_CAP_FOUND;
2742 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2743 		rval = FC_CAP_FOUND;
2744 	} else {
2745 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2746 		rval = FC_CAP_ERROR;
2747 	}
2748 
2749 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2750 
2751 	return (rval);
2752 }
2753 
2754 /*
2755  * ql_getmap
2756  *	Request of Arbitrated Loop (AL-PA) map.
2757  *
2758  * Input:
2759  *	fca_handle = handle setup by ql_bind_port().
2760  *	mapbuf= buffer pointer for map.
2761  *
2762  * Returns:
2763  *	FC_OLDPORT - the specified port is not operating in loop mode.
2764  *	FC_OFFLINE - the specified port is not online.
2765  *	FC_NOMAP - there is no loop map available for this port.
2766  *	FC_UNBOUND - the fca_handle specified is not bound.
2767  *	FC_SUCCESS - a valid map has been placed in mapbuf.
2768  *
2769  * Context:
2770  *	Kernel context.
2771  */
2772 static int
2773 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2774 {
2775 	ql_adapter_state_t	*ha;
2776 	clock_t			timer;
2777 	int			rval = FC_SUCCESS;
2778 
2779 	ha = ql_fca_handle_to_state(fca_handle);
2780 	if (ha == NULL) {
2781 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2782 		    (void *)fca_handle);
2783 		return (FC_UNBOUND);
2784 	}
2785 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2786 
2787 	ASSERT(ha->power_level == PM_LEVEL_D0);
2788 
2789 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2790 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2791 
2792 	/* Wait for suspension to end. */
2793 	TASK_DAEMON_LOCK(ha);
2794 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2795 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2796 
2797 		/* 30 seconds from now */
2798 		timer = ddi_get_lbolt();
2799 		timer += drv_usectohz(30000000);
2800 
2801 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2802 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2803 			/*
2804 			 * The timeout time 'timer' was
2805 			 * reached without the condition
2806 			 * being signaled.
2807 			 */
2808 
2809 			/* Release task daemon lock. */
2810 			TASK_DAEMON_UNLOCK(ha);
2811 
2812 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2813 			return (FC_TRAN_BUSY);
2814 		}
2815 	}
2816 	/* Release task daemon lock. */
2817 	TASK_DAEMON_UNLOCK(ha);
2818 
2819 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2820 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2821 		/*
2822 		 * Now, since transport drivers cosider this as an
2823 		 * offline condition, let's wait for few seconds
2824 		 * for any loop transitions before we reset the.
2825 		 * chip and restart all over again.
2826 		 */
2827 		ql_delay(ha, 2000000);
2828 		EL(ha, "failed, FC_NOMAP\n");
2829 		rval = FC_NOMAP;
2830 	} else {
2831 		/*EMPTY*/
2832 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2833 		    "data %xh %xh %xh %xh\n", ha->instance,
2834 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2835 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2836 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2837 	}
2838 
2839 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2840 #if 0
2841 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2842 #endif
2843 	return (rval);
2844 }
2845 
2846 /*
2847  * ql_transport
2848  *	Issue an I/O request. Handles all regular requests.
2849  *
2850  * Input:
2851  *	fca_handle = handle setup by ql_bind_port().
2852  *	pkt = pointer to fc_packet.
2853  *
2854  * Returns:
2855  *	FC_SUCCESS - the packet was accepted for transport.
2856  *	FC_TRANSPORT_ERROR - a transport error occurred.
2857  *	FC_BADPACKET - the packet to be transported had not been
2858  *			initialized by this FCA.
2859  *	FC_UNBOUND - the fca_handle specified is not bound.
2860  *
2861  * Context:
2862  *	Kernel context.
2863  */
2864 static int
2865 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2866 {
2867 	ql_adapter_state_t	*ha;
2868 	int			rval = FC_TRANSPORT_ERROR;
2869 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2870 
2871 	/* Verify proper command. */
2872 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2873 	if (ha == NULL) {
2874 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2875 		    rval, fca_handle);
2876 		return (rval);
2877 	}
2878 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2879 #if 0
2880 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2881 	    sizeof (fc_frame_hdr_t) / 4);
2882 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2883 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2884 #endif
2885 	if (ha->flags & ADAPTER_SUSPENDED) {
2886 		ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
2887 	}
2888 
2889 	ASSERT(ha->power_level == PM_LEVEL_D0);
2890 
2891 	/* Reset SRB flags. */
2892 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2893 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2894 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2895 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2896 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2897 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2898 	    SRB_MS_PKT);
2899 
2900 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2901 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2902 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2903 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2904 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2905 
2906 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2907 	case R_CTL_COMMAND:
2908 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2909 			sp->flags |= SRB_FCP_CMD_PKT;
2910 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2911 		}
2912 		break;
2913 
2914 	default:
2915 		/* Setup response header and buffer. */
2916 		if (pkt->pkt_rsplen) {
2917 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2918 		}
2919 
2920 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
2921 		case R_CTL_SOLICITED_DATA:
2922 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2923 				sp->flags |= SRB_FCP_DATA_PKT;
2924 				rval = ql_fcp_data_rsp(ha, pkt, sp);
2925 			}
2926 			break;
2927 
2928 		case R_CTL_STATUS:
2929 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2930 				sp->flags |= SRB_FCP_RSP_PKT;
2931 				rval = ql_fcp_data_rsp(ha, pkt, sp);
2932 			}
2933 			break;
2934 
2935 		case R_CTL_UNSOL_DATA:
2936 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
2937 				sp->flags |= SRB_IP_PKT;
2938 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
2939 			}
2940 			break;
2941 
2942 		case R_CTL_UNSOL_CONTROL:
2943 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
2944 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
2945 				rval = ql_fc_services(ha, pkt);
2946 			}
2947 			break;
2948 
2949 		default:
2950 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
2951 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2952 			rval = FC_TRANSPORT_ERROR;
2953 			EL(ha, "unknown, r_ctl=%xh\n",
2954 			    pkt->pkt_cmd_fhdr.r_ctl);
2955 			break;
2956 		}
2957 	}
2958 
2959 	if (rval != FC_SUCCESS) {
2960 		EL(ha, "failed, rval = %xh\n", rval);
2961 	} else {
2962 		/*EMPTY*/
2963 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2964 	}
2965 
2966 	return (rval);
2967 }
2968 
2969 /*
2970  * ql_ub_alloc
2971  *	Allocate buffers for unsolicited exchanges.
2972  *
2973  * Input:
2974  *	fca_handle = handle setup by ql_bind_port().
2975  *	tokens = token array for each buffer.
2976  *	size = size of each buffer.
2977  *	count = pointer to number of buffers.
2978  *	type = the FC-4 type the buffers are reserved for.
2979  *		1 = Extended Link Services, 5 = LLC/SNAP
2980  *
2981  * Returns:
2982  *	FC_FAILURE - buffers could not be allocated.
2983  *	FC_TOOMANY - the FCA could not allocate the requested
2984  *			number of buffers.
2985  *	FC_SUCCESS - unsolicited buffers were allocated.
2986  *	FC_UNBOUND - the fca_handle specified is not bound.
2987  *
2988  * Context:
2989  *	Kernel context.
2990  */
2991 static int
2992 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
2993     uint32_t *count, uint32_t type)
2994 {
2995 	ql_adapter_state_t	*ha;
2996 	caddr_t			bufp = NULL;
2997 	fc_unsol_buf_t		*ubp;
2998 	ql_srb_t		*sp;
2999 	uint32_t		index;
3000 	uint32_t		cnt;
3001 	uint32_t		ub_array_index = 0;
3002 	int			rval = FC_SUCCESS;
3003 	int			ub_updated = FALSE;
3004 
3005 	/* Check handle. */
3006 	ha = ql_fca_handle_to_state(fca_handle);
3007 	if (ha == NULL) {
3008 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3009 		    (void *)fca_handle);
3010 		return (FC_UNBOUND);
3011 	}
3012 	QL_PRINT_10(CE_CONT, "(%d,%d): started, count = %xh\n",
3013 	    ha->instance, ha->vp_index, *count);
3014 
3015 	QL_PM_LOCK(ha);
3016 	if (ha->power_level != PM_LEVEL_D0) {
3017 		QL_PM_UNLOCK(ha);
3018 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3019 		    ha->vp_index);
3020 		return (FC_FAILURE);
3021 	}
3022 	QL_PM_UNLOCK(ha);
3023 
3024 	/* Acquire adapter state lock. */
3025 	ADAPTER_STATE_LOCK(ha);
3026 
3027 	/* Check the count. */
3028 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3029 		*count = 0;
3030 		EL(ha, "failed, FC_TOOMANY\n");
3031 		rval = FC_TOOMANY;
3032 	}
3033 
3034 	/*
3035 	 * reset ub_array_index
3036 	 */
3037 	ub_array_index = 0;
3038 
3039 	/*
3040 	 * Now proceed to allocate any buffers required
3041 	 */
3042 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3043 		/* Allocate all memory needed. */
3044 		ubp = (fc_unsol_buf_t *)kmem_zalloc(
3045 		    sizeof (fc_unsol_buf_t), KM_SLEEP);
3046 		if (ubp == NULL) {
3047 			EL(ha, "failed, FC_FAILURE\n");
3048 			rval = FC_FAILURE;
3049 		} else {
3050 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3051 			if (sp == NULL) {
3052 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3053 				rval = FC_FAILURE;
3054 			} else {
3055 				if (type == FC_TYPE_IS8802_SNAP) {
3056 #ifdef	__sparc
3057 					if (ql_get_dma_mem(ha,
3058 					    &sp->ub_buffer, size,
3059 					    BIG_ENDIAN_DMA,
3060 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3061 						rval = FC_FAILURE;
3062 						kmem_free(ubp,
3063 						    sizeof (fc_unsol_buf_t));
3064 						kmem_free(sp,
3065 						    sizeof (ql_srb_t));
3066 					} else {
3067 						bufp = sp->ub_buffer.bp;
3068 						sp->ub_size = size;
3069 					}
3070 #else
3071 					if (ql_get_dma_mem(ha,
3072 					    &sp->ub_buffer, size,
3073 					    LITTLE_ENDIAN_DMA,
3074 					    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3075 						rval = FC_FAILURE;
3076 						kmem_free(ubp,
3077 						    sizeof (fc_unsol_buf_t));
3078 						kmem_free(sp,
3079 						    sizeof (ql_srb_t));
3080 					} else {
3081 						bufp = sp->ub_buffer.bp;
3082 						sp->ub_size = size;
3083 					}
3084 #endif
3085 				} else {
3086 					bufp = kmem_zalloc(size, KM_SLEEP);
3087 					if (bufp == NULL) {
3088 						rval = FC_FAILURE;
3089 						kmem_free(ubp,
3090 						    sizeof (fc_unsol_buf_t));
3091 						kmem_free(sp,
3092 						    sizeof (ql_srb_t));
3093 					} else {
3094 						sp->ub_size = size;
3095 					}
3096 				}
3097 			}
3098 		}
3099 
3100 		if (rval == FC_SUCCESS) {
3101 			/* Find next available slot. */
3102 			QL_UB_LOCK(ha);
3103 			while (ha->ub_array[ub_array_index] != NULL) {
3104 				ub_array_index++;
3105 			}
3106 
3107 			ubp->ub_fca_private = (void *)sp;
3108 
3109 			/* init cmd links */
3110 			sp->cmd.base_address = sp;
3111 			sp->cmd.prev = NULL;
3112 			sp->cmd.next = NULL;
3113 			sp->cmd.head = NULL;
3114 
3115 			/* init wdg links */
3116 			sp->wdg.base_address = sp;
3117 			sp->wdg.prev = NULL;
3118 			sp->wdg.next = NULL;
3119 			sp->wdg.head = NULL;
3120 			sp->ha = ha;
3121 
3122 			ubp->ub_buffer = bufp;
3123 			ubp->ub_bufsize = size;
3124 			ubp->ub_port_handle = fca_handle;
3125 			ubp->ub_token = ub_array_index;
3126 
3127 			/* Save the token. */
3128 			tokens[index] = ub_array_index;
3129 
3130 			/* Setup FCA private information. */
3131 			sp->ub_type = type;
3132 			sp->handle = ub_array_index;
3133 			sp->flags |= SRB_UB_IN_FCA;
3134 
3135 			ha->ub_array[ub_array_index] = ubp;
3136 			ha->ub_allocated++;
3137 			ub_updated = TRUE;
3138 			QL_UB_UNLOCK(ha);
3139 		}
3140 	}
3141 
3142 	/* Release adapter state lock. */
3143 	ADAPTER_STATE_UNLOCK(ha);
3144 
3145 	/* IP buffer. */
3146 	if (ub_updated) {
3147 		if ((type == FC_TYPE_IS8802_SNAP) &&
3148 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_25XX))))) {
3149 
3150 			ADAPTER_STATE_LOCK(ha);
3151 			ha->flags |= IP_ENABLED;
3152 			ADAPTER_STATE_UNLOCK(ha);
3153 
3154 			if (!(ha->flags & IP_INITIALIZED)) {
3155 				if (CFG_IST(ha, CFG_CTRL_2425)) {
3156 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3157 					    LSB(ql_ip_mtu);
3158 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3159 					    MSB(ql_ip_mtu);
3160 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3161 					    LSB(size);
3162 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3163 					    MSB(size);
3164 
3165 					cnt = CHAR_TO_SHORT(
3166 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3167 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3168 
3169 					if (cnt < *count) {
3170 						ha->ip_init_ctrl_blk.cb24.cc[0]
3171 						    = LSB(*count);
3172 						ha->ip_init_ctrl_blk.cb24.cc[1]
3173 						    = MSB(*count);
3174 					}
3175 				} else {
3176 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3177 					    LSB(ql_ip_mtu);
3178 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3179 					    MSB(ql_ip_mtu);
3180 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3181 					    LSB(size);
3182 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3183 					    MSB(size);
3184 
3185 					cnt = CHAR_TO_SHORT(
3186 					    ha->ip_init_ctrl_blk.cb.cc[0],
3187 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3188 
3189 					if (cnt < *count) {
3190 						ha->ip_init_ctrl_blk.cb.cc[0] =
3191 						    LSB(*count);
3192 						ha->ip_init_ctrl_blk.cb.cc[1] =
3193 						    MSB(*count);
3194 					}
3195 				}
3196 
3197 				(void) ql_initialize_ip(ha);
3198 			}
3199 			ql_isp_rcvbuf(ha);
3200 		}
3201 
3202 		if (CFG_IST(ha, CFG_TARGET_MODE_ENABLE) &&
3203 		    (type == FC_TYPE_SCSI_FCP)) {
3204 			(void) ql_modify_lun(ha);
3205 		}
3206 	}
3207 
3208 	if (rval != FC_SUCCESS) {
3209 		EL(ha, "failed=%xh\n", rval);
3210 	} else {
3211 		/*EMPTY*/
3212 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
3213 		    ha->vp_index);
3214 	}
3215 	return (rval);
3216 }
3217 
3218 /*
3219  * ql_ub_free
3220  *	Free unsolicited buffers.
3221  *
3222  * Input:
3223  *	fca_handle = handle setup by ql_bind_port().
3224  *	count = number of buffers.
3225  *	tokens = token array for each buffer.
3226  *
3227  * Returns:
3228  *	FC_SUCCESS - the requested buffers have been freed.
3229  *	FC_UNBOUND - the fca_handle specified is not bound.
3230  *	FC_UB_BADTOKEN - an invalid token was encountered.
3231  *			 No buffers have been released.
3232  *
3233  * Context:
3234  *	Kernel context.
3235  */
3236 static int
3237 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3238 {
3239 	ql_adapter_state_t	*ha;
3240 	ql_srb_t		*sp;
3241 	uint32_t		index;
3242 	uint64_t		ub_array_index;
3243 	int			rval = FC_SUCCESS;
3244 
3245 	/* Check handle. */
3246 	ha = ql_fca_handle_to_state(fca_handle);
3247 	if (ha == NULL) {
3248 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3249 		    (void *)fca_handle);
3250 		return (FC_UNBOUND);
3251 	}
3252 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3253 
3254 	/* Acquire adapter state lock. */
3255 	ADAPTER_STATE_LOCK(ha);
3256 
3257 	/* Check all returned tokens. */
3258 	for (index = 0; index < count; index++) {
3259 		fc_unsol_buf_t	*ubp;
3260 
3261 		/* Check the token range. */
3262 		if ((ub_array_index = tokens[index]) >=
3263 		    QL_UB_LIMIT) {
3264 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3265 			rval = FC_UB_BADTOKEN;
3266 			break;
3267 		}
3268 
3269 		/* Check the unsolicited buffer array. */
3270 		QL_UB_LOCK(ha);
3271 		ubp = ha->ub_array[ub_array_index];
3272 
3273 		if (ubp == NULL) {
3274 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3275 			rval = FC_UB_BADTOKEN;
3276 			QL_UB_UNLOCK(ha);
3277 			break;
3278 		}
3279 
3280 		/* Check the state of the unsolicited buffer. */
3281 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3282 		sp->flags |= SRB_UB_FREE_REQUESTED;
3283 
3284 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3285 		    (sp->flags & (SRB_UB_CALLBACK |
3286 		    SRB_UB_ACQUIRED))) {
3287 			QL_UB_UNLOCK(ha);
3288 			ADAPTER_STATE_UNLOCK(ha);
3289 			delay(drv_usectohz(100000));
3290 			ADAPTER_STATE_LOCK(ha);
3291 			QL_UB_LOCK(ha);
3292 		}
3293 		ha->ub_array[ub_array_index] = NULL;
3294 		QL_UB_UNLOCK(ha);
3295 		ql_free_unsolicited_buffer(ha, ubp);
3296 	}
3297 
3298 	if (rval == FC_SUCCESS) {
3299 		/*
3300 		 * Signal any pending hardware reset when there are
3301 		 * no more unsolicited buffers in use.
3302 		 */
3303 		if (ha->ub_allocated == 0) {
3304 			cv_broadcast(&ha->pha->cv_ub);
3305 		}
3306 	}
3307 
3308 	/* Release adapter state lock. */
3309 	ADAPTER_STATE_UNLOCK(ha);
3310 
3311 	/*
3312 	 * Inform the firmware about the change of scsi target
3313 	 * mode buffers.
3314 	 */
3315 	if (ha->flags & TARGET_MODE_INITIALIZED) {
3316 		(void) ql_modify_lun(ha);
3317 	}
3318 
3319 	if (rval != FC_SUCCESS) {
3320 		EL(ha, "failed=%xh\n", rval);
3321 	} else {
3322 		/*EMPTY*/
3323 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3324 	}
3325 	return (rval);
3326 }
3327 
3328 /*
3329  * ql_ub_release
3330  *	Release unsolicited buffers from FC Transport
3331  *	to FCA for future use.
3332  *
3333  * Input:
3334  *	fca_handle = handle setup by ql_bind_port().
3335  *	count = number of buffers.
3336  *	tokens = token array for each buffer.
3337  *
3338  * Returns:
3339  *	FC_SUCCESS - the requested buffers have been released.
3340  *	FC_UNBOUND - the fca_handle specified is not bound.
3341  *	FC_UB_BADTOKEN - an invalid token was encountered.
3342  *		No buffers have been released.
3343  *
3344  * Context:
3345  *	Kernel context.
3346  */
3347 static int
3348 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3349 {
3350 	ql_adapter_state_t	*ha;
3351 	ql_srb_t		*sp;
3352 	ql_tgt_t		*tq;
3353 	port_id_t		d_id;
3354 	uint32_t		index;
3355 	uint64_t		ub_array_index;
3356 	int			rval = FC_SUCCESS;
3357 	int			ub_ip_updated = FALSE;
3358 
3359 	/* Check handle. */
3360 	ha = ql_fca_handle_to_state(fca_handle);
3361 	if (ha == NULL) {
3362 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3363 		    (void *)fca_handle);
3364 		return (FC_UNBOUND);
3365 	}
3366 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3367 
3368 	/* Acquire adapter state lock. */
3369 	ADAPTER_STATE_LOCK(ha);
3370 	QL_UB_LOCK(ha);
3371 
3372 	/* Check all returned tokens. */
3373 	for (index = 0; index < count; index++) {
3374 		/* Check the token range. */
3375 		if ((ub_array_index = tokens[index]) >=
3376 		    QL_UB_LIMIT) {
3377 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3378 			rval = FC_UB_BADTOKEN;
3379 			break;
3380 		}
3381 
3382 		/* Check the unsolicited buffer array. */
3383 		if (ha->ub_array[ub_array_index] == NULL) {
3384 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3385 			rval = FC_UB_BADTOKEN;
3386 			break;
3387 		}
3388 
3389 		/* Check the state of the unsolicited buffer. */
3390 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3391 		if (sp->flags & SRB_UB_IN_FCA) {
3392 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3393 			rval = FC_UB_BADTOKEN;
3394 			break;
3395 		}
3396 	}
3397 
3398 	/* If all tokens checkout, release the buffers. */
3399 	if (rval == FC_SUCCESS) {
3400 		/* Check all returned tokens. */
3401 		for (index = 0; index < count; index++) {
3402 			fc_unsol_buf_t	*ubp;
3403 
3404 			ub_array_index = tokens[index];
3405 			ubp = ha->ub_array[ub_array_index];
3406 			sp = ubp->ub_fca_private;
3407 			d_id.b24 = ubp->ub_frame.s_id;
3408 			tq = ql_d_id_to_queue(ha, d_id);
3409 
3410 			if (sp->ub_type == FC_TYPE_SCSI_FCP &&
3411 			    ubp->ub_resp_flags & FC_UB_RESP_LOGIN_REQUIRED &&
3412 			    tq != NULL) {
3413 				ctio_entry_t	*ctio;
3414 
3415 				if (ql_req_pkt(ha, (request_t **)
3416 				    &ctio) == QL_SUCCESS) {
3417 					ctio->entry_type = CTIO_TYPE_2;
3418 
3419 					if (CFG_IST(ha,
3420 					    CFG_EXT_FW_INTERFACE)) {
3421 						ctio->initiator_id_l =
3422 						    LSB(tq->loop_id);
3423 						ctio->initiator_id_h =
3424 						    MSB(tq->loop_id);
3425 					} else {
3426 						ctio->initiator_id_h =
3427 						    LSB(tq->loop_id);
3428 					}
3429 					ctio->rx_id = ubp->ub_frame.rx_id;
3430 					ctio->flags_l = BIT_7 | BIT_6;
3431 					ctio->flags_h = BIT_7 | BIT_1 | BIT_0;
3432 					ctio->timeout = 0xffff;
3433 					ctio->type.s0_32bit.scsi_status_l =
3434 					    STATUS_BUSY;
3435 					/* Issue command to ISP */
3436 					ql_isp_cmd(ha);
3437 				}
3438 			}
3439 
3440 			ubp->ub_resp_flags = 0;
3441 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3442 			sp->flags |= SRB_UB_IN_FCA;
3443 
3444 			/* IP buffer. */
3445 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3446 				ub_ip_updated = TRUE;
3447 			}
3448 		}
3449 	}
3450 
3451 	QL_UB_UNLOCK(ha);
3452 	/* Release adapter state lock. */
3453 	ADAPTER_STATE_UNLOCK(ha);
3454 
3455 	/*
3456 	 * XXX: We should call ql_isp_rcvbuf() to return a
3457 	 * buffer to ISP only if the number of buffers fall below
3458 	 * the low water mark.
3459 	 */
3460 	if (ub_ip_updated) {
3461 		ql_isp_rcvbuf(ha);
3462 	}
3463 
3464 	if (rval != FC_SUCCESS) {
3465 		EL(ha, "failed, rval = %xh\n", rval);
3466 	} else {
3467 		/*EMPTY*/
3468 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3469 	}
3470 	return (rval);
3471 }
3472 
3473 /*
3474  * ql_abort
3475  *	Abort a packet.
3476  *
3477  * Input:
3478  *	fca_handle = handle setup by ql_bind_port().
3479  *	pkt = pointer to fc_packet.
3480  *	flags = KM_SLEEP flag.
3481  *
3482  * Returns:
3483  *	FC_SUCCESS - the packet has successfully aborted.
3484  *	FC_ABORTED - the packet has successfully aborted.
3485  *	FC_ABORTING - the packet is being aborted.
3486  *	FC_ABORT_FAILED - the packet could not be aborted.
3487  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3488  *		to abort the packet.
3489  *	FC_BADEXCHANGE - no packet found.
3490  *	FC_UNBOUND - the fca_handle specified is not bound.
3491  *
3492  * Context:
3493  *	Kernel context.
3494  */
3495 static int
3496 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3497 {
3498 	port_id_t		d_id;
3499 	ql_link_t		*link;
3500 	ql_adapter_state_t	*ha, *pha;
3501 	ql_srb_t		*sp;
3502 	ql_tgt_t		*tq;
3503 	ql_lun_t		*lq;
3504 	int			rval = FC_ABORTED;
3505 
3506 	ha = ql_fca_handle_to_state(fca_handle);
3507 	if (ha == NULL) {
3508 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3509 		    (void *)fca_handle);
3510 		return (FC_UNBOUND);
3511 	}
3512 
3513 	pha = ha->pha;
3514 
3515 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3516 
3517 	ASSERT(pha->power_level == PM_LEVEL_D0);
3518 
3519 	/* Get target queue pointer. */
3520 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3521 	tq = ql_d_id_to_queue(ha, d_id);
3522 
3523 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3524 		if (tq == NULL) {
3525 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3526 			rval = FC_TRANSPORT_ERROR;
3527 		} else {
3528 			EL(ha, "failed, FC_OFFLINE\n");
3529 			rval = FC_OFFLINE;
3530 		}
3531 		return (rval);
3532 	}
3533 
3534 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3535 	lq = sp->lun_queue;
3536 
3537 	/* Set poll flag if sleep wanted. */
3538 	if (flags == KM_SLEEP) {
3539 		sp->flags |= SRB_POLL;
3540 	}
3541 
3542 	/* Acquire target queue lock. */
3543 	DEVICE_QUEUE_LOCK(tq);
3544 	REQUEST_RING_LOCK(ha);
3545 
3546 	/* If command not already started. */
3547 	if (!(sp->flags & SRB_ISP_STARTED)) {
3548 		/* Check pending queue for command. */
3549 		sp = NULL;
3550 		for (link = pha->pending_cmds.first; link != NULL;
3551 		    link = link->next) {
3552 			sp = link->base_address;
3553 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3554 				/* Remove srb from q. */
3555 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3556 				break;
3557 			} else {
3558 				sp = NULL;
3559 			}
3560 		}
3561 		REQUEST_RING_UNLOCK(ha);
3562 
3563 		if (sp == NULL) {
3564 			/* Check for cmd on device queue. */
3565 			for (link = lq->cmd.first; link != NULL;
3566 			    link = link->next) {
3567 				sp = link->base_address;
3568 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3569 					/* Remove srb from q. */
3570 					ql_remove_link(&lq->cmd, &sp->cmd);
3571 					break;
3572 				} else {
3573 					sp = NULL;
3574 				}
3575 			}
3576 		}
3577 		/* Release device lock */
3578 		DEVICE_QUEUE_UNLOCK(tq);
3579 
3580 		/* If command on target queue. */
3581 		if (sp != NULL) {
3582 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3583 
3584 			/* Set return status */
3585 			pkt->pkt_reason = CS_ABORTED;
3586 
3587 			sp->cmd.next = NULL;
3588 			ql_done(&sp->cmd);
3589 			rval = FC_ABORTED;
3590 		} else {
3591 			EL(ha, "failed, FC_BADEXCHANGE\n");
3592 			rval = FC_BADEXCHANGE;
3593 		}
3594 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3595 		/* Release device queue lock. */
3596 		REQUEST_RING_UNLOCK(ha);
3597 		DEVICE_QUEUE_UNLOCK(tq);
3598 		EL(ha, "failed, already done, FC_FAILURE\n");
3599 		rval = FC_FAILURE;
3600 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3601 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3602 		/*
3603 		 * If here, target data/resp ctio is with Fw.
3604 		 * Since firmware is supposed to terminate such I/Os
3605 		 * with an error, we need not do any thing. If FW
3606 		 * decides not to terminate those IOs and simply keep
3607 		 * quite then we need to initiate cleanup here by
3608 		 * calling ql_done.
3609 		 */
3610 		REQUEST_RING_UNLOCK(ha);
3611 		DEVICE_QUEUE_UNLOCK(tq);
3612 		rval = FC_ABORTED;
3613 	} else {
3614 		request_t	*ep = pha->request_ring_bp;
3615 		uint16_t	cnt;
3616 
3617 		if (sp->handle != 0) {
3618 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3619 				if (sp->handle == ddi_get32(
3620 				    pha->hba_buf.acc_handle, &ep->handle)) {
3621 					ep->entry_type = INVALID_ENTRY_TYPE;
3622 					break;
3623 				}
3624 				ep++;
3625 			}
3626 		}
3627 
3628 		/* Release device queue lock. */
3629 		REQUEST_RING_UNLOCK(ha);
3630 		DEVICE_QUEUE_UNLOCK(tq);
3631 
3632 		sp->flags |= SRB_ABORTING;
3633 		(void) ql_abort_command(ha, sp);
3634 		pkt->pkt_reason = CS_ABORTED;
3635 		rval = FC_ABORTED;
3636 	}
3637 
3638 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3639 
3640 	return (rval);
3641 }
3642 
3643 /*
3644  * ql_reset
3645  *	Reset link or hardware.
3646  *
3647  * Input:
3648  *	fca_handle = handle setup by ql_bind_port().
3649  *	cmd = reset type command.
3650  *
3651  * Returns:
3652  *	FC_SUCCESS - reset has successfully finished.
3653  *	FC_UNBOUND - the fca_handle specified is not bound.
3654  *	FC_FAILURE - reset failed.
3655  *
3656  * Context:
3657  *	Kernel context.
3658  */
3659 static int
3660 ql_reset(opaque_t fca_handle, uint32_t cmd)
3661 {
3662 	ql_adapter_state_t	*ha;
3663 	int			rval = FC_SUCCESS, rval2;
3664 
3665 	ha = ql_fca_handle_to_state(fca_handle);
3666 	if (ha == NULL) {
3667 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3668 		    (void *)fca_handle);
3669 		return (FC_UNBOUND);
3670 	}
3671 
3672 	QL_PRINT_10(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3673 	    ha->vp_index, cmd);
3674 
3675 	ASSERT(ha->power_level == PM_LEVEL_D0);
3676 
3677 	switch (cmd) {
3678 	case FC_FCA_CORE:
3679 		/* dump firmware core if specified. */
3680 		if (ha->vp_index == 0) {
3681 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3682 				EL(ha, "failed, FC_FAILURE\n");
3683 				rval = FC_FAILURE;
3684 			}
3685 		}
3686 		break;
3687 	case FC_FCA_LINK_RESET:
3688 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3689 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3690 				EL(ha, "failed, FC_FAILURE-2\n");
3691 				rval = FC_FAILURE;
3692 			}
3693 		}
3694 		break;
3695 	case FC_FCA_RESET_CORE:
3696 	case FC_FCA_RESET:
3697 		/* if dump firmware core if specified. */
3698 		if (cmd == FC_FCA_RESET_CORE) {
3699 			if (ha->vp_index != 0) {
3700 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3701 				    ? QL_SUCCESS : ql_loop_reset(ha);
3702 			} else {
3703 				rval2 = ql_dump_firmware(ha);
3704 			}
3705 			if (rval2 != QL_SUCCESS) {
3706 				EL(ha, "failed, FC_FAILURE-3\n");
3707 				rval = FC_FAILURE;
3708 			}
3709 		}
3710 
3711 		/* Free up all unsolicited buffers. */
3712 		if (ha->ub_allocated != 0) {
3713 			/* Inform to release buffers. */
3714 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3715 			ha->state |= FC_STATE_RESET_REQUESTED;
3716 			if (ha->flags & FCA_BOUND) {
3717 				(ha->bind_info.port_statec_cb)
3718 				    (ha->bind_info.port_handle,
3719 				    ha->state);
3720 			}
3721 		}
3722 
3723 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3724 
3725 		/* All buffers freed */
3726 		if (ha->ub_allocated == 0) {
3727 			/* Hardware reset. */
3728 			if (cmd == FC_FCA_RESET) {
3729 				if (ha->vp_index == 0) {
3730 					(void) ql_abort_isp(ha);
3731 				} else if (!(ha->pha->task_daemon_flags &
3732 				    LOOP_DOWN)) {
3733 					(void) ql_loop_reset(ha);
3734 				}
3735 			}
3736 
3737 			/* Inform that the hardware has been reset */
3738 			ha->state |= FC_STATE_RESET;
3739 		} else {
3740 			/*
3741 			 * the port driver expects an online if
3742 			 * buffers are not freed.
3743 			 */
3744 			if (ha->topology & QL_LOOP_CONNECTION) {
3745 				ha->state |= FC_STATE_LOOP;
3746 			} else {
3747 				ha->state |= FC_STATE_ONLINE;
3748 			}
3749 		}
3750 
3751 		TASK_DAEMON_LOCK(ha);
3752 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3753 		TASK_DAEMON_UNLOCK(ha);
3754 
3755 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3756 
3757 		break;
3758 	default:
3759 		EL(ha, "unknown cmd=%xh\n", cmd);
3760 		break;
3761 	}
3762 
3763 	if (rval != FC_SUCCESS) {
3764 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3765 	} else {
3766 		/*EMPTY*/
3767 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
3768 		    ha->vp_index);
3769 	}
3770 
3771 	return (rval);
3772 }
3773 
3774 /*
3775  * ql_port_manage
3776  *	Perform port management or diagnostics.
3777  *
3778  * Input:
3779  *	fca_handle = handle setup by ql_bind_port().
3780  *	cmd = pointer to command structure.
3781  *
3782  * Returns:
3783  *	FC_SUCCESS - the request completed successfully.
3784  *	FC_FAILURE - the request did not complete successfully.
3785  *	FC_UNBOUND - the fca_handle specified is not bound.
3786  *
3787  * Context:
3788  *	Kernel context.
3789  */
3790 static int
3791 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3792 {
3793 	clock_t			timer;
3794 	uint16_t		index;
3795 	uint32_t		*bp;
3796 	port_id_t		d_id;
3797 	ql_link_t		*link;
3798 	ql_adapter_state_t	*ha, *pha;
3799 	ql_tgt_t		*tq;
3800 	dma_mem_t		buffer_xmt, buffer_rcv;
3801 	size_t			length;
3802 	uint32_t		cnt;
3803 	char			buf[80];
3804 	lbp_t			*lb;
3805 	ql_mbx_data_t		mr;
3806 	app_mbx_cmd_t		*mcp;
3807 	int			i0;
3808 	uint8_t			*bptr;
3809 	int			rval2, rval = FC_SUCCESS;
3810 	uint32_t		opcode;
3811 
3812 	ha = ql_fca_handle_to_state(fca_handle);
3813 	if (ha == NULL) {
3814 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3815 		    (void *)fca_handle);
3816 		return (FC_UNBOUND);
3817 	}
3818 	pha = ha->pha;
3819 
3820 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3821 	    cmd->pm_cmd_code);
3822 
3823 	ASSERT(pha->power_level == PM_LEVEL_D0);
3824 
3825 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3826 
3827 	/*
3828 	 * Wait for all outstanding commands to complete
3829 	 */
3830 	index = (uint16_t)ql_wait_outstanding(ha);
3831 
3832 	if (index != MAX_OUTSTANDING_COMMANDS) {
3833 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3834 		ql_restart_queues(ha);
3835 		EL(ha, "failed, FC_TRAN_BUSY\n");
3836 		return (FC_TRAN_BUSY);
3837 	}
3838 
3839 	switch (cmd->pm_cmd_code) {
3840 	case FC_PORT_BYPASS:
3841 		d_id.b24 = *cmd->pm_cmd_buf;
3842 		tq = ql_d_id_to_queue(ha, d_id);
3843 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3844 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3845 			rval = FC_FAILURE;
3846 		}
3847 		break;
3848 	case FC_PORT_UNBYPASS:
3849 		d_id.b24 = *cmd->pm_cmd_buf;
3850 		tq = ql_d_id_to_queue(ha, d_id);
3851 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3852 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3853 			rval = FC_FAILURE;
3854 		}
3855 		break;
3856 	case FC_PORT_GET_FW_REV:
3857 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3858 		    pha->fw_minor_version, pha->fw_subminor_version);
3859 		length = strlen(buf) + 1;
3860 		if (cmd->pm_data_len < length) {
3861 			cmd->pm_data_len = length;
3862 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3863 			rval = FC_FAILURE;
3864 		} else {
3865 			(void) strcpy(cmd->pm_data_buf, buf);
3866 		}
3867 		break;
3868 
3869 	case FC_PORT_GET_FCODE_REV: {
3870 		caddr_t		fcode_ver_buf = NULL;
3871 
3872 		i0 = 0;
3873 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3874 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3875 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3876 		    (caddr_t)&fcode_ver_buf, &i0);
3877 		length = (uint_t)i0;
3878 
3879 		if (rval2 != DDI_PROP_SUCCESS) {
3880 			EL(ha, "failed, getting version = %xh\n", rval2);
3881 			length = 20;
3882 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3883 			if (fcode_ver_buf != NULL) {
3884 				(void) sprintf(fcode_ver_buf,
3885 				    "NO FCODE FOUND");
3886 			}
3887 		}
3888 
3889 		if (cmd->pm_data_len < length) {
3890 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3891 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3892 			cmd->pm_data_len = length;
3893 			rval = FC_FAILURE;
3894 		} else if (fcode_ver_buf != NULL) {
3895 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3896 			    length);
3897 		}
3898 
3899 		if (fcode_ver_buf != NULL) {
3900 			kmem_free(fcode_ver_buf, length);
3901 		}
3902 		break;
3903 	}
3904 
3905 	case FC_PORT_GET_DUMP:
3906 		QL_DUMP_LOCK(pha);
3907 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3908 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3909 			    "length=%lxh\n", cmd->pm_data_len);
3910 			cmd->pm_data_len = pha->risc_dump_size;
3911 			rval = FC_FAILURE;
3912 		} else if (pha->ql_dump_state & QL_DUMPING) {
3913 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3914 			rval = FC_TRAN_BUSY;
3915 		} else if (pha->ql_dump_state & QL_DUMP_VALID) {
3916 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3917 			pha->ql_dump_state |= QL_DUMP_UPLOADED;
3918 		} else {
3919 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3920 			rval = FC_FAILURE;
3921 		}
3922 		QL_DUMP_UNLOCK(pha);
3923 		break;
3924 	case FC_PORT_FORCE_DUMP:
3925 		PORTMANAGE_LOCK(ha);
3926 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3927 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3928 			rval = FC_FAILURE;
3929 		}
3930 		PORTMANAGE_UNLOCK(ha);
3931 		break;
3932 	case FC_PORT_DOWNLOAD_FW:
3933 		PORTMANAGE_LOCK(ha);
3934 		if (CFG_IST(ha, CFG_CTRL_2425)) {
3935 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3936 			    (uint32_t)cmd->pm_data_len,
3937 			    FLASH_24XX_FIRMWARE_ADDR) != QL_SUCCESS) {
3938 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3939 				rval = FC_FAILURE;
3940 			}
3941 			ql_reset_chip(ha);
3942 			(void) ql_abort_isp(ha);
3943 		} else {
3944 			/* Save copy of the firmware. */
3945 			if (pha->risc_code != NULL) {
3946 				kmem_free(pha->risc_code, pha->risc_code_size);
3947 				pha->risc_code = NULL;
3948 				pha->risc_code_size = 0;
3949 			}
3950 
3951 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3952 			    KM_SLEEP);
3953 			if (pha->risc_code != NULL) {
3954 				pha->risc_code_size =
3955 				    (uint32_t)cmd->pm_data_len;
3956 				bcopy(cmd->pm_data_buf, pha->risc_code,
3957 				    cmd->pm_data_len);
3958 
3959 				/* Do abort to force reload. */
3960 				ql_reset_chip(ha);
3961 				if (ql_abort_isp(ha) != QL_SUCCESS) {
3962 					kmem_free(pha->risc_code,
3963 					    pha->risc_code_size);
3964 					pha->risc_code = NULL;
3965 					pha->risc_code_size = 0;
3966 					ql_reset_chip(ha);
3967 					(void) ql_abort_isp(ha);
3968 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3969 					    " FC_FAILURE\n");
3970 					rval = FC_FAILURE;
3971 				}
3972 			}
3973 		}
3974 		PORTMANAGE_UNLOCK(ha);
3975 		break;
3976 	case FC_PORT_GET_DUMP_SIZE:
3977 		bp = (uint32_t *)cmd->pm_data_buf;
3978 		*bp = pha->risc_dump_size;
3979 		break;
3980 	case FC_PORT_DIAG:
3981 		/*
3982 		 * Prevents concurrent diags
3983 		 */
3984 		PORTMANAGE_LOCK(ha);
3985 
3986 		/* Wait for suspension to end. */
3987 		for (timer = 0; timer < 3000 &&
3988 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
3989 			ql_delay(ha, 10000);
3990 		}
3991 
3992 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
3993 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
3994 			rval = FC_TRAN_BUSY;
3995 			PORTMANAGE_UNLOCK(ha);
3996 			break;
3997 		}
3998 
3999 		switch (cmd->pm_cmd_flags) {
4000 		case QL_DIAG_EXEFMW:
4001 			if (ql_start_firmware(ha) != QL_SUCCESS) {
4002 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4003 				rval = FC_FAILURE;
4004 			}
4005 			break;
4006 		case QL_DIAG_CHKCMDQUE:
4007 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4008 			    i0++) {
4009 				cnt += (pha->outstanding_cmds[i0] != NULL);
4010 			}
4011 			if (cnt != 0) {
4012 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4013 				    "FC_FAILURE\n");
4014 				rval = FC_FAILURE;
4015 			}
4016 			break;
4017 		case QL_DIAG_FMWCHKSUM:
4018 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
4019 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4020 				    "FC_FAILURE\n");
4021 				rval = FC_FAILURE;
4022 			}
4023 			break;
4024 		case QL_DIAG_SLFTST:
4025 			if (ql_online_selftest(ha) != QL_SUCCESS) {
4026 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4027 				rval = FC_FAILURE;
4028 			}
4029 			ql_reset_chip(ha);
4030 			(void) ql_abort_isp(ha);
4031 			break;
4032 		case QL_DIAG_REVLVL:
4033 			if (cmd->pm_stat_len <
4034 			    sizeof (ql_adapter_revlvl_t)) {
4035 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4036 				    "slen=%lxh, rlvllen=%lxh\n",
4037 				    cmd->pm_stat_len,
4038 				    sizeof (ql_adapter_revlvl_t));
4039 				rval = FC_NOMEM;
4040 			} else {
4041 				bcopy((void *)&(pha->adapter_stats->revlvl),
4042 				    cmd->pm_stat_buf,
4043 				    (size_t)cmd->pm_stat_len);
4044 				cmd->pm_stat_len =
4045 				    sizeof (ql_adapter_revlvl_t);
4046 			}
4047 			break;
4048 		case QL_DIAG_LPBMBX:
4049 
4050 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4051 				EL(ha, "failed, QL_DIAG_LPBMBX "
4052 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
4053 				    "reqd=%lxh\n", cmd->pm_data_len,
4054 				    sizeof (struct app_mbx_cmd));
4055 				rval = FC_INVALID_REQUEST;
4056 				break;
4057 			}
4058 			/*
4059 			 * Don't do the wrap test on a 2200 when the
4060 			 * firmware is running.
4061 			 */
4062 			if (!CFG_IST(ha, CFG_CTRL_2200)) {
4063 				mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4064 				mr.mb[1] = mcp->mb[1];
4065 				mr.mb[2] = mcp->mb[2];
4066 				mr.mb[3] = mcp->mb[3];
4067 				mr.mb[4] = mcp->mb[4];
4068 				mr.mb[5] = mcp->mb[5];
4069 				mr.mb[6] = mcp->mb[6];
4070 				mr.mb[7] = mcp->mb[7];
4071 
4072 				bcopy(&mr.mb[0], &mr.mb[10],
4073 				    sizeof (uint16_t) * 8);
4074 				if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4075 					EL(ha, "failed, QL_DIAG_LPBMBX "
4076 					    "FC_FAILURE\n");
4077 					rval = FC_FAILURE;
4078 					break;
4079 				}
4080 				if (mr.mb[i0] != mr.mb[i0 + 10]) {
4081 					EL(ha, "failed, QL_DIAG_LPBMBX "
4082 					    "FC_FAILURE-2\n");
4083 
4084 					(void) ql_flash_errlog(ha,
4085 					    FLASH_ERRLOG_ISP_ERR, 0,
4086 					    RD16_IO_REG(ha, hccr),
4087 					    RD16_IO_REG(ha, istatus));
4088 
4089 					rval = FC_FAILURE;
4090 					break;
4091 				}
4092 			}
4093 			(void) ql_abort_isp(ha);
4094 			break;
4095 		case QL_DIAG_LPBDTA:
4096 			/*
4097 			 * For loopback data, we receive the
4098 			 * data back in pm_stat_buf. This provides
4099 			 * the user an opportunity to compare the
4100 			 * transmitted and received data.
4101 			 *
4102 			 * NB: lb->options are:
4103 			 *	0 --> Ten bit loopback
4104 			 *	1 --> One bit loopback
4105 			 *	2 --> External loopback
4106 			 */
4107 			if (cmd->pm_data_len > 65536) {
4108 				rval = FC_TOOMANY;
4109 				EL(ha, "failed, QL_DIAG_LPBDTA "
4110 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4111 				break;
4112 			}
4113 			if (ql_get_dma_mem(ha, &buffer_xmt,
4114 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4115 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4116 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4117 				rval = FC_NOMEM;
4118 				break;
4119 			}
4120 			if (ql_get_dma_mem(ha, &buffer_rcv,
4121 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4122 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4123 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4124 				rval = FC_NOMEM;
4125 				break;
4126 			}
4127 			ddi_rep_put8(buffer_xmt.acc_handle,
4128 			    (uint8_t *)cmd->pm_data_buf,
4129 			    (uint8_t *)buffer_xmt.bp,
4130 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4131 
4132 			/* 22xx's adapter must be in loop mode for test. */
4133 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4134 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4135 				if (ha->flags & POINT_TO_POINT ||
4136 				    (ha->task_daemon_flags & LOOP_DOWN &&
4137 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4138 					cnt = *bptr;
4139 					*bptr = (uint8_t)
4140 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4141 					(void) ql_abort_isp(ha);
4142 					*bptr = (uint8_t)cnt;
4143 				}
4144 			}
4145 
4146 			/* Shutdown IP. */
4147 			if (pha->flags & IP_INITIALIZED) {
4148 				(void) ql_shutdown_ip(pha);
4149 			}
4150 
4151 			lb = (lbp_t *)cmd->pm_cmd_buf;
4152 			lb->transfer_count =
4153 			    (uint32_t)cmd->pm_data_len;
4154 			lb->transfer_segment_count = 0;
4155 			lb->receive_segment_count = 0;
4156 			lb->transfer_data_address =
4157 			    buffer_xmt.cookie.dmac_address;
4158 			lb->receive_data_address =
4159 			    buffer_rcv.cookie.dmac_address;
4160 
4161 			if ((lb->options & 7) == 2 &&
4162 			    pha->task_daemon_flags &
4163 			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4164 				/* Loop must be up for external */
4165 				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4166 				rval = FC_TRAN_BUSY;
4167 			} else if (ql_loop_back(ha, lb,
4168 			    buffer_xmt.cookie.dmac_notused,
4169 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4170 				bzero((void *)cmd->pm_stat_buf,
4171 				    cmd->pm_stat_len);
4172 				ddi_rep_get8(buffer_rcv.acc_handle,
4173 				    (uint8_t *)cmd->pm_stat_buf,
4174 				    (uint8_t *)buffer_rcv.bp,
4175 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4176 			} else {
4177 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4178 				rval = FC_FAILURE;
4179 			}
4180 
4181 			ql_free_phys(ha, &buffer_xmt);
4182 			ql_free_phys(ha, &buffer_rcv);
4183 
4184 			/* Needed to recover the f/w */
4185 			(void) ql_abort_isp(ha);
4186 
4187 			/* Restart IP if it was shutdown. */
4188 			if (pha->flags & IP_ENABLED &&
4189 			    !(pha->flags & IP_INITIALIZED)) {
4190 				(void) ql_initialize_ip(pha);
4191 				ql_isp_rcvbuf(pha);
4192 			}
4193 
4194 			break;
4195 		case QL_DIAG_ECHO: {
4196 			/*
4197 			 * issue an echo command with a user supplied
4198 			 * data pattern and destination address
4199 			 */
4200 			echo_t		echo;		/* temp echo struct */
4201 
4202 			/* Setup echo cmd & adjust for platform */
4203 			opcode = QL_ECHO_CMD;
4204 			BIG_ENDIAN_32(&opcode);
4205 
4206 			/*
4207 			 * due to limitations in the ql
4208 			 * firmaware the echo data field is
4209 			 * limited to 220
4210 			 */
4211 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4212 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4213 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4214 				    "cmdl1=%lxh, statl2=%lxh\n",
4215 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4216 				rval = FC_TOOMANY;
4217 				break;
4218 			}
4219 
4220 			/*
4221 			 * the input data buffer has the user
4222 			 * supplied data pattern.  The "echoed"
4223 			 * data will be DMAed into the output
4224 			 * data buffer.  Therefore the length
4225 			 * of the output buffer must be equal
4226 			 * to or greater then the input buffer
4227 			 * length
4228 			 */
4229 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4230 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4231 				    " cmdl1=%lxh, statl2=%lxh\n",
4232 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4233 				rval = FC_TOOMANY;
4234 				break;
4235 			}
4236 			/* add four bytes for the opcode */
4237 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4238 
4239 			/*
4240 			 * are we 32 or 64 bit addressed???
4241 			 * We need to get the appropriate
4242 			 * DMA and set the command options;
4243 			 * 64 bit (bit 6) or 32 bit
4244 			 * (no bit 6) addressing.
4245 			 * while we are at it lets ask for
4246 			 * real echo (bit 15)
4247 			 */
4248 			echo.options = BIT_15;
4249 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4250 				echo.options = (uint16_t)
4251 				    (echo.options | BIT_6);
4252 			}
4253 
4254 			/*
4255 			 * Set up the DMA mappings for the
4256 			 * output and input data buffers.
4257 			 * First the output buffer
4258 			 */
4259 			if (ql_get_dma_mem(ha, &buffer_xmt,
4260 			    (uint32_t)(cmd->pm_data_len + 4),
4261 			    LITTLE_ENDIAN_DMA,
4262 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4263 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4264 				rval = FC_NOMEM;
4265 				break;
4266 			}
4267 			echo.transfer_data_address = buffer_xmt.cookie;
4268 
4269 			/* Next the input buffer */
4270 			if (ql_get_dma_mem(ha, &buffer_rcv,
4271 			    (uint32_t)(cmd->pm_data_len + 4),
4272 			    LITTLE_ENDIAN_DMA,
4273 			    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4274 				/*
4275 				 * since we could not allocate
4276 				 * DMA space for the input
4277 				 * buffer we need to clean up
4278 				 * by freeing the DMA space
4279 				 * we allocated for the output
4280 				 * buffer
4281 				 */
4282 				ql_free_phys(ha, &buffer_xmt);
4283 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4284 				rval = FC_NOMEM;
4285 				break;
4286 			}
4287 			echo.receive_data_address = buffer_rcv.cookie;
4288 
4289 			/*
4290 			 * copy the 4 byte ECHO op code to the
4291 			 * allocated DMA space
4292 			 */
4293 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4294 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4295 
4296 			/*
4297 			 * copy the user supplied data to the
4298 			 * allocated DMA space
4299 			 */
4300 			ddi_rep_put8(buffer_xmt.acc_handle,
4301 			    (uint8_t *)cmd->pm_cmd_buf,
4302 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4303 			    DDI_DEV_AUTOINCR);
4304 
4305 			/* Shutdown IP. */
4306 			if (pha->flags & IP_INITIALIZED) {
4307 				(void) ql_shutdown_ip(pha);
4308 			}
4309 
4310 			/* send the echo */
4311 			if (ql_echo(ha, &echo) == QL_SUCCESS) {
4312 				ddi_rep_put8(buffer_rcv.acc_handle,
4313 				    (uint8_t *)buffer_rcv.bp + 4,
4314 				    (uint8_t *)cmd->pm_stat_buf,
4315 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4316 			} else {
4317 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4318 				rval = FC_FAILURE;
4319 			}
4320 
4321 			/* Restart IP if it was shutdown. */
4322 			if (pha->flags & IP_ENABLED &&
4323 			    !(pha->flags & IP_INITIALIZED)) {
4324 				(void) ql_initialize_ip(pha);
4325 				ql_isp_rcvbuf(pha);
4326 			}
4327 			/* free up our DMA buffers */
4328 			ql_free_phys(ha, &buffer_xmt);
4329 			ql_free_phys(ha, &buffer_rcv);
4330 			break;
4331 		}
4332 		default:
4333 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4334 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4335 			rval = FC_INVALID_REQUEST;
4336 			break;
4337 		}
4338 		PORTMANAGE_UNLOCK(ha);
4339 		break;
4340 	case FC_PORT_LINK_STATE:
4341 		/* Check for name equal to null. */
4342 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4343 		    index++) {
4344 			if (cmd->pm_cmd_buf[index] != 0) {
4345 				break;
4346 			}
4347 		}
4348 
4349 		/* If name not null. */
4350 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4351 			/* Locate device queue. */
4352 			tq = NULL;
4353 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4354 			    tq == NULL; index++) {
4355 				for (link = ha->dev[index].first; link != NULL;
4356 				    link = link->next) {
4357 					tq = link->base_address;
4358 
4359 					if (bcmp((void *)&tq->port_name[0],
4360 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4361 						break;
4362 					} else {
4363 						tq = NULL;
4364 					}
4365 				}
4366 			}
4367 
4368 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4369 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4370 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4371 			} else {
4372 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4373 				    FC_STATE_OFFLINE;
4374 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4375 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4376 			}
4377 		} else {
4378 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4379 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4380 		}
4381 		break;
4382 	case FC_PORT_INITIALIZE:
4383 		if (cmd->pm_cmd_len >= 8) {
4384 			tq = NULL;
4385 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4386 			    tq == NULL; index++) {
4387 				for (link = ha->dev[index].first; link != NULL;
4388 				    link = link->next) {
4389 					tq = link->base_address;
4390 
4391 					if (bcmp((void *)&tq->port_name[0],
4392 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4393 						if (!VALID_DEVICE_ID(ha,
4394 						    tq->loop_id)) {
4395 							tq = NULL;
4396 						}
4397 						break;
4398 					} else {
4399 						tq = NULL;
4400 					}
4401 				}
4402 			}
4403 
4404 			if (tq == NULL || ql_target_reset(ha, tq,
4405 			    ha->loop_reset_delay) != QL_SUCCESS) {
4406 				EL(ha, "failed, FC_PORT_INITIALIZE "
4407 				    "FC_FAILURE\n");
4408 				rval = FC_FAILURE;
4409 			}
4410 		} else {
4411 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4412 			    "clen=%lxh\n", cmd->pm_cmd_len);
4413 
4414 			rval = FC_FAILURE;
4415 		}
4416 		break;
4417 	case FC_PORT_RLS:
4418 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4419 			EL(ha, "failed, buffer size passed: %lxh, "
4420 			    "req: %lxh\n", cmd->pm_data_len,
4421 			    (sizeof (fc_rls_acc_t)));
4422 			rval = FC_FAILURE;
4423 		} else if (LOOP_NOT_READY(pha)) {
4424 			EL(ha, "loop NOT ready\n");
4425 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4426 		} else if (ql_get_link_status(ha, ha->loop_id,
4427 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4428 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4429 			rval = FC_FAILURE;
4430 #ifdef _BIG_ENDIAN
4431 		} else {
4432 			fc_rls_acc_t		*rls;
4433 
4434 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4435 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4436 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4437 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4438 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4439 #endif /* _BIG_ENDIAN */
4440 		}
4441 		break;
4442 	case FC_PORT_GET_NODE_ID:
4443 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4444 		    cmd->pm_data_buf) != QL_SUCCESS) {
4445 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4446 			rval = FC_FAILURE;
4447 		}
4448 		break;
4449 	case FC_PORT_SET_NODE_ID:
4450 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4451 		    cmd->pm_data_buf) != QL_SUCCESS) {
4452 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4453 			rval = FC_FAILURE;
4454 		}
4455 		break;
4456 	case FC_PORT_DOWNLOAD_FCODE:
4457 		PORTMANAGE_LOCK(ha);
4458 		if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
4459 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4460 			    (uint32_t)cmd->pm_data_len);
4461 		} else {
4462 			if (cmd->pm_data_buf[0] == 4 &&
4463 			    cmd->pm_data_buf[8] == 0 &&
4464 			    cmd->pm_data_buf[9] == 0x10 &&
4465 			    cmd->pm_data_buf[10] == 0 &&
4466 			    cmd->pm_data_buf[11] == 0) {
4467 				rval = ql_24xx_load_flash(ha,
4468 				    (uint8_t *)cmd->pm_data_buf,
4469 				    (uint32_t)cmd->pm_data_len,
4470 				    FLASH_24XX_FIRMWARE_ADDR);
4471 			} else {
4472 				rval = ql_24xx_load_flash(ha,
4473 				    (uint8_t *)cmd->pm_data_buf,
4474 				    (uint32_t)cmd->pm_data_len, 0);
4475 			}
4476 		}
4477 
4478 		if (rval != QL_SUCCESS) {
4479 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4480 			rval = FC_FAILURE;
4481 		} else {
4482 			rval = FC_SUCCESS;
4483 		}
4484 		ql_reset_chip(ha);
4485 		(void) ql_abort_isp(ha);
4486 		PORTMANAGE_UNLOCK(ha);
4487 		break;
4488 	default:
4489 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4490 		rval = FC_BADCMD;
4491 		break;
4492 	}
4493 
4494 	/* Wait for suspension to end. */
4495 	ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4496 	timer = 0;
4497 
4498 	while (timer++ < 3000 &&
4499 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4500 		ql_delay(ha, 10000);
4501 	}
4502 
4503 	ql_restart_queues(ha);
4504 
4505 	if (rval != FC_SUCCESS) {
4506 		EL(ha, "failed, rval = %xh\n", rval);
4507 	} else {
4508 		/*EMPTY*/
4509 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4510 	}
4511 
4512 	return (rval);
4513 }
4514 
4515 static opaque_t
4516 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4517 {
4518 	port_id_t		id;
4519 	ql_adapter_state_t	*ha;
4520 
4521 	id.r.rsvd_1 = 0;
4522 	id.b24 = d_id.port_id;
4523 
4524 	ha = ql_fca_handle_to_state(fca_handle);
4525 	if (ha == NULL) {
4526 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4527 		    (void *)fca_handle);
4528 		return (NULL);
4529 	}
4530 
4531 	return (ql_d_id_to_queue(ha, id));
4532 }
4533 
4534 /*
4535  * ql_notify
4536  *	Receive notifications from ULPs regarding particular actions
4537  *
4538  * Input:
4539  *	fca_handle = handle set up by ql_bind_port().
4540  *	cmd = flag indicating the action to take
4541  *
4542  * Output:
4543  *	FC_SUCCESS - action was taken successfully or no action was needed.
4544  *	FC_FAILURE - action was attempted and failed.
4545  *	FC_UNBOUND - the specified handle is not bound to a port.
4546  */
4547 static int
4548 ql_notify(opaque_t fca_handle, uint32_t cmd)
4549 {
4550 	ql_adapter_state_t		*ha;
4551 	int				rval = FC_SUCCESS;
4552 	tgt_cmd_t			*tgtcmd;
4553 	notify_acknowledge_entry_t	*nack;
4554 
4555 	ha = ql_fca_handle_to_state(fca_handle);
4556 	if (ha == NULL) {
4557 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4558 		    (void *)fca_handle);
4559 		return (FC_UNBOUND);
4560 	}
4561 	QL_PRINT_3(CE_CONT, "(%d): started cmd = %xh\n", ha->instance, cmd);
4562 
4563 	switch (FC_NOTIFY_GET_FLAG(cmd)) {
4564 	case FC_NOTIFY_RECOVERY_DONE:
4565 
4566 		QL_PRINT_3(CE_CONT, "(%d): got NOTIFY_RECOVERY cmd=%xh\n",
4567 		    ha->instance, cmd);
4568 
4569 		mutex_enter(&ha->ql_nack_mtx);
4570 		tgtcmd = ha->ql_nack;
4571 		ha->ql_nack = NULL;
4572 		mutex_exit(&ha->ql_nack_mtx);
4573 
4574 		if (tgtcmd != NULL) {
4575 			QL_PRINT_3(CE_CONT, "(%d): N_ACK pending\n",
4576 			    ha->instance);
4577 
4578 			rval = ql_req_pkt(ha, (request_t **)&nack);
4579 			if (rval == QL_SUCCESS) {
4580 				ql_notify_acknowledge_iocb(ha, tgtcmd, nack);
4581 
4582 				QL_PRINT_3(CE_CONT, "(%d): send notify_ack: "
4583 				    "status=%xh flag=%xh\n", ha->instance,
4584 				    tgtcmd->status, nack->flags_l);
4585 
4586 				kmem_free(tgtcmd, sizeof (tgt_cmd_t));
4587 				/* Issue command to ISP */
4588 				ql_isp_cmd(ha);
4589 			} else {
4590 				kmem_free(tgtcmd, sizeof (tgt_cmd_t));
4591 			}
4592 		}
4593 		break;
4594 
4595 	case FC_NOTIFY_RECOVERY_CLEANUP:
4596 		break;
4597 
4598 	case FC_NOTIFY_TARGET_MODE:
4599 		if (CFG_IST(ha, CFG_TARGET_MODE_ENABLE)) {
4600 			break;
4601 		}
4602 
4603 		ha->cfg_flags |= (CFG_ENABLE_TARGET_MODE |
4604 		    CFG_ENABLE_HARD_ADDRESS);
4605 		ha->port_hard_address.r.d_id[0] =
4606 		    LSB(LSW(FC_NOTIFY_GET_VALUE(cmd)));
4607 		ha->port_hard_address.r.d_id[1] =
4608 		    MSB(LSW(FC_NOTIFY_GET_VALUE(cmd)));
4609 		ha->port_hard_address.r.d_id[2] =
4610 		    LSB(MSW(FC_NOTIFY_GET_VALUE(cmd)));
4611 		QL_PRINT_3(CE_CONT, "(%d): Target mode set, hard address ="
4612 		    " %xh\n", ha->instance, ha->port_hard_address.b24);
4613 		rval = ql_initialize_adapter(ha);
4614 		ql_awaken_task_daemon(ha, NULL, 0, 0);
4615 		break;
4616 
4617 	case FC_NOTIFY_NO_TARGET_MODE:
4618 		if (!CFG_IST(ha, CFG_TARGET_MODE_ENABLE)) {
4619 			break;
4620 		}
4621 		ha->cfg_flags &= ~(CFG_ENABLE_TARGET_MODE |
4622 		    CFG_ENABLE_HARD_ADDRESS);
4623 		QL_PRINT_3(CE_CONT, "(%d): Target mode cleared\n",
4624 		    ha->instance);
4625 		rval = ql_initialize_adapter(ha);
4626 		ql_awaken_task_daemon(ha, NULL, 0, LOOP_DOWN);
4627 		break;
4628 
4629 	case FC_NOTIFY_THROTTLE:
4630 		cmn_err(CE_NOTE, "!%s(%d) max cmds per target %xh", QL_NAME,
4631 		    ha->instance, FC_NOTIFY_GET_VALUE(cmd));
4632 		break;
4633 
4634 	default:
4635 		break;
4636 	}
4637 
4638 	if (rval != FC_SUCCESS) {
4639 		EL(ha, "failed=%xh\n", rval);
4640 	} else {
4641 		/*EMPTY*/
4642 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4643 	}
4644 	return (rval);
4645 }
4646 
4647 /* ************************************************************************ */
4648 /*			FCA Driver Local Support Functions.		    */
4649 /* ************************************************************************ */
4650 
4651 /*
4652  * ql_cmd_setup
4653  *	Verifies proper command.
4654  *
4655  * Input:
4656  *	fca_handle = handle setup by ql_bind_port().
4657  *	pkt = pointer to fc_packet.
4658  *	rval = pointer for return value.
4659  *
4660  * Returns:
4661  *	Adapter state pointer, NULL = failure.
4662  *
4663  * Context:
4664  *	Kernel context.
4665  */
4666 static ql_adapter_state_t *
4667 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4668 {
4669 	ql_adapter_state_t	*ha, *pha;
4670 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4671 	ql_tgt_t		*tq;
4672 	port_id_t		d_id;
4673 
4674 	pkt->pkt_resp_resid = 0;
4675 	pkt->pkt_data_resid = 0;
4676 
4677 	/* check that the handle is assigned by this FCA */
4678 	ha = ql_fca_handle_to_state(fca_handle);
4679 	if (ha == NULL) {
4680 		*rval = FC_UNBOUND;
4681 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4682 		    (void *)fca_handle);
4683 		return (NULL);
4684 	}
4685 	pha = ha->pha;
4686 
4687 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4688 
4689 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4690 		return (ha);
4691 	}
4692 
4693 	if (!(pha->flags & ONLINE)) {
4694 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4695 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4696 		*rval = FC_TRANSPORT_ERROR;
4697 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4698 		return (NULL);
4699 	}
4700 
4701 	/* Exit on loop down. */
4702 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4703 	    pha->task_daemon_flags & LOOP_DOWN &&
4704 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4705 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4706 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4707 		*rval = FC_OFFLINE;
4708 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4709 		return (NULL);
4710 	}
4711 
4712 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4713 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4714 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4715 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4716 			d_id.r.rsvd_1 = 0;
4717 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4718 			tq = ql_d_id_to_queue(ha, d_id);
4719 
4720 			pkt->pkt_fca_device = (opaque_t)tq;
4721 		}
4722 
4723 		if (tq != NULL) {
4724 			DEVICE_QUEUE_LOCK(tq);
4725 			if (tq->flags & (TQF_RSCN_RCVD |
4726 			    TQF_NEED_AUTHENTICATION)) {
4727 				*rval = FC_DEVICE_BUSY;
4728 				DEVICE_QUEUE_UNLOCK(tq);
4729 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4730 				    tq->flags, tq->d_id.b24);
4731 				return (NULL);
4732 			}
4733 			DEVICE_QUEUE_UNLOCK(tq);
4734 		}
4735 	}
4736 
4737 	/*
4738 	 * Check DMA pointers.
4739 	 */
4740 	*rval = DDI_SUCCESS;
4741 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4742 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4743 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4744 		if (*rval == DDI_SUCCESS) {
4745 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4746 		}
4747 	}
4748 
4749 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4750 	    pkt->pkt_rsplen != 0) {
4751 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4752 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4753 		if (*rval == DDI_SUCCESS) {
4754 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4755 		}
4756 	}
4757 
4758 	/*
4759 	 * Minimum branch conditional; Change it with care.
4760 	 */
4761 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4762 	    (pkt->pkt_datalen != 0)) != 0) {
4763 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4764 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4765 		if (*rval == DDI_SUCCESS) {
4766 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4767 		}
4768 	}
4769 
4770 	if (*rval != DDI_SUCCESS) {
4771 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4772 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4773 
4774 		/* Do command callback. */
4775 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4776 			ql_awaken_task_daemon(ha, sp, 0, 0);
4777 		}
4778 		*rval = FC_BADPACKET;
4779 		EL(ha, "failed, bad DMA pointers\n");
4780 		return (NULL);
4781 	}
4782 
4783 	if (sp->magic_number != QL_FCA_BRAND) {
4784 		*rval = FC_BADPACKET;
4785 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4786 		return (NULL);
4787 	}
4788 	*rval = FC_SUCCESS;
4789 
4790 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4791 
4792 	return (ha);
4793 }
4794 
4795 /*
4796  * ql_els_plogi
4797  *	Issue a extended link service port login request.
4798  *
4799  * Input:
4800  *	ha = adapter state pointer.
4801  *	pkt = pointer to fc_packet.
4802  *
4803  * Returns:
4804  *	FC_SUCCESS - the packet was accepted for transport.
4805  *	FC_TRANSPORT_ERROR - a transport error occurred.
4806  *
4807  * Context:
4808  *	Kernel context.
4809  */
4810 static int
4811 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4812 {
4813 	ql_tgt_t		*tq = NULL;
4814 	port_id_t		d_id;
4815 	la_els_logi_t		acc;
4816 	class_svc_param_t	*class3_param;
4817 	int			ret;
4818 	int			rval = FC_SUCCESS;
4819 
4820 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4821 
4822 	TASK_DAEMON_LOCK(ha);
4823 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4824 		TASK_DAEMON_UNLOCK(ha);
4825 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4826 		return (FC_OFFLINE);
4827 	}
4828 	TASK_DAEMON_UNLOCK(ha);
4829 
4830 	bzero(&acc, sizeof (acc));
4831 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4832 
4833 	switch (ret = ql_login_port(ha, d_id)) {
4834 	case QL_SUCCESS:
4835 		tq = ql_d_id_to_queue(ha, d_id);
4836 		break;
4837 
4838 	case QL_LOOP_ID_USED:
4839 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4840 			tq = ql_d_id_to_queue(ha, d_id);
4841 		}
4842 		break;
4843 
4844 	default:
4845 		break;
4846 	}
4847 
4848 	if (ret != QL_SUCCESS) {
4849 		/*
4850 		 * Invalidate this entry so as to seek a fresh loop ID
4851 		 * in case firmware reassigns it to something else
4852 		 */
4853 		tq = ql_d_id_to_queue(ha, d_id);
4854 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4855 			tq->loop_id = PORT_NO_LOOP_ID;
4856 		}
4857 	} else if (tq) {
4858 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4859 	}
4860 
4861 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4862 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4863 
4864 		/* Build ACC. */
4865 		acc.ls_code.ls_code = LA_ELS_ACC;
4866 		acc.common_service.fcph_version = 0x2006;
4867 		acc.common_service.cmn_features = 0x8800;
4868 		CFG_IST(ha, CFG_CTRL_2425) ?
4869 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4870 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4871 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4872 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4873 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4874 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4875 		acc.common_service.conc_sequences = 0xff;
4876 		acc.common_service.relative_offset = 0x03;
4877 		acc.common_service.e_d_tov = 0x7d0;
4878 
4879 		bcopy((void *)&tq->port_name[0],
4880 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4881 		bcopy((void *)&tq->node_name[0],
4882 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4883 
4884 		class3_param = (class_svc_param_t *)&acc.class_3;
4885 		class3_param->class_valid_svc_opt = 0x8000;
4886 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4887 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4888 		class3_param->conc_sequences = tq->class3_conc_sequences;
4889 		class3_param->open_sequences_per_exch =
4890 		    tq->class3_open_sequences_per_exch;
4891 
4892 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4893 			acc.ls_code.ls_code = LA_ELS_RJT;
4894 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4895 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4896 			rval = FC_TRAN_BUSY;
4897 		} else {
4898 			DEVICE_QUEUE_LOCK(tq);
4899 			tq->logout_sent = 0;
4900 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4901 			if (CFG_IST(ha, CFG_CTRL_2425)) {
4902 				tq->flags |= TQF_IIDMA_NEEDED;
4903 			}
4904 			DEVICE_QUEUE_UNLOCK(tq);
4905 
4906 			if (CFG_IST(ha, CFG_CTRL_2425)) {
4907 				TASK_DAEMON_LOCK(ha);
4908 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4909 				TASK_DAEMON_UNLOCK(ha);
4910 			}
4911 
4912 			pkt->pkt_state = FC_PKT_SUCCESS;
4913 		}
4914 	} else {
4915 		/* Build RJT. */
4916 		acc.ls_code.ls_code = LA_ELS_RJT;
4917 
4918 		switch (ret) {
4919 		case QL_FUNCTION_TIMEOUT:
4920 			pkt->pkt_state = FC_PKT_TIMEOUT;
4921 			pkt->pkt_reason = FC_REASON_HW_ERROR;
4922 			break;
4923 
4924 		case QL_MEMORY_ALLOC_FAILED:
4925 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4926 			pkt->pkt_reason = FC_REASON_NOMEM;
4927 			rval = FC_TRAN_BUSY;
4928 			break;
4929 
4930 		case QL_FABRIC_NOT_INITIALIZED:
4931 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4932 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4933 			rval = FC_TRAN_BUSY;
4934 			break;
4935 
4936 		default:
4937 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4938 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4939 			break;
4940 		}
4941 
4942 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4943 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4944 		    pkt->pkt_reason, ret, rval);
4945 	}
4946 
4947 	if (tq != NULL) {
4948 		DEVICE_QUEUE_LOCK(tq);
4949 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4950 		if (rval == FC_TRAN_BUSY) {
4951 			if (tq->d_id.b24 != BROADCAST_ADDR) {
4952 				tq->flags |= TQF_NEED_AUTHENTICATION;
4953 			}
4954 		}
4955 		DEVICE_QUEUE_UNLOCK(tq);
4956 	}
4957 
4958 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4959 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4960 
4961 	if (rval != FC_SUCCESS) {
4962 		EL(ha, "failed, rval = %xh\n", rval);
4963 	} else {
4964 		/*EMPTY*/
4965 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4966 	}
4967 	return (rval);
4968 }
4969 
4970 /*
4971  * ql_els_flogi
4972  *	Issue a extended link service fabric login request.
4973  *
4974  * Input:
4975  *	ha = adapter state pointer.
4976  *	pkt = pointer to fc_packet.
4977  *
4978  * Returns:
4979  *	FC_SUCCESS - the packet was accepted for transport.
4980  *	FC_TRANSPORT_ERROR - a transport error occurred.
4981  *
4982  * Context:
4983  *	Kernel context.
4984  */
4985 static int
4986 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4987 {
4988 	ql_tgt_t		*tq = NULL;
4989 	port_id_t		d_id;
4990 	la_els_logi_t		acc;
4991 	class_svc_param_t	*class3_param;
4992 	int			rval = FC_SUCCESS;
4993 
4994 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4995 
4996 	bzero(&acc, sizeof (acc));
4997 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4998 
4999 	tq = ql_d_id_to_queue(ha, d_id);
5000 	if (tq != NULL) {
5001 		/* Build ACC. */
5002 		acc.ls_code.ls_code = LA_ELS_ACC;
5003 		acc.common_service.fcph_version = 0x2006;
5004 		acc.common_service.cmn_features = 0x1b00;
5005 		CFG_IST(ha, CFG_CTRL_2425) ?
5006 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5007 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
5008 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5009 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5010 		    ha->init_ctrl_blk.cb.max_frame_length[0],
5011 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
5012 		acc.common_service.conc_sequences = 0xff;
5013 		acc.common_service.relative_offset = 0x03;
5014 		acc.common_service.e_d_tov = 0x7d0;
5015 
5016 		bcopy((void *)&tq->port_name[0],
5017 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5018 		bcopy((void *)&tq->node_name[0],
5019 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
5020 
5021 		class3_param = (class_svc_param_t *)&acc.class_3;
5022 		class3_param->class_valid_svc_opt = 0x8800;
5023 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
5024 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
5025 		class3_param->conc_sequences = tq->class3_conc_sequences;
5026 		class3_param->open_sequences_per_exch =
5027 		    tq->class3_open_sequences_per_exch;
5028 
5029 		pkt->pkt_state = FC_PKT_SUCCESS;
5030 	} else {
5031 		/* Build RJT. */
5032 		acc.ls_code.ls_code = LA_ELS_RJT;
5033 
5034 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5035 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5036 	}
5037 
5038 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5039 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5040 
5041 	if (rval != FC_SUCCESS) {
5042 		EL(ha, "failed, rval = %xh\n", rval);
5043 	} else {
5044 		/*EMPTY*/
5045 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5046 	}
5047 	return (rval);
5048 }
5049 
5050 /*
5051  * ql_els_logo
5052  *	Issue a extended link service logout request.
5053  *
5054  * Input:
5055  *	ha = adapter state pointer.
5056  *	pkt = pointer to fc_packet.
5057  *
5058  * Returns:
5059  *	FC_SUCCESS - the packet was accepted for transport.
5060  *	FC_TRANSPORT_ERROR - a transport error occurred.
5061  *
5062  * Context:
5063  *	Kernel context.
5064  */
5065 static int
5066 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5067 {
5068 	port_id_t	d_id;
5069 	ql_tgt_t	*tq;
5070 	la_els_logo_t	acc;
5071 	int		rval = FC_SUCCESS;
5072 
5073 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5074 
5075 	bzero(&acc, sizeof (acc));
5076 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5077 
5078 	tq = ql_d_id_to_queue(ha, d_id);
5079 	if (tq) {
5080 		DEVICE_QUEUE_LOCK(tq);
5081 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5082 			DEVICE_QUEUE_UNLOCK(tq);
5083 			return (FC_SUCCESS);
5084 		}
5085 
5086 		tq->flags |= TQF_NEED_AUTHENTICATION;
5087 
5088 		do {
5089 			DEVICE_QUEUE_UNLOCK(tq);
5090 			(void) ql_abort_device(ha, tq, 1);
5091 
5092 			/*
5093 			 * Wait for commands to drain in F/W (doesn't
5094 			 * take more than a few milliseconds)
5095 			 */
5096 			ql_delay(ha, 10000);
5097 
5098 			DEVICE_QUEUE_LOCK(tq);
5099 		} while (tq->outcnt);
5100 
5101 		DEVICE_QUEUE_UNLOCK(tq);
5102 	}
5103 
5104 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5105 		/* Build ACC. */
5106 		acc.ls_code.ls_code = LA_ELS_ACC;
5107 
5108 		pkt->pkt_state = FC_PKT_SUCCESS;
5109 	} else {
5110 		/* Build RJT. */
5111 		acc.ls_code.ls_code = LA_ELS_RJT;
5112 
5113 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5114 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5115 	}
5116 
5117 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5118 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5119 
5120 	if (rval != FC_SUCCESS) {
5121 		EL(ha, "failed, rval = %xh\n", rval);
5122 	} else {
5123 		/*EMPTY*/
5124 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5125 	}
5126 	return (rval);
5127 }
5128 
5129 /*
5130  * ql_els_prli
5131  *	Issue a extended link service process login request.
5132  *
5133  * Input:
5134  *	ha = adapter state pointer.
5135  *	pkt = pointer to fc_packet.
5136  *
5137  * Returns:
5138  *	FC_SUCCESS - the packet was accepted for transport.
5139  *	FC_TRANSPORT_ERROR - a transport error occurred.
5140  *
5141  * Context:
5142  *	Kernel context.
5143  */
5144 static int
5145 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5146 {
5147 	ql_tgt_t		*tq;
5148 	port_id_t		d_id;
5149 	la_els_prli_t		acc;
5150 	prli_svc_param_t	*param;
5151 	int			rval = FC_SUCCESS;
5152 
5153 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5154 
5155 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5156 
5157 	tq = ql_d_id_to_queue(ha, d_id);
5158 	if (tq != NULL) {
5159 
5160 		/* Build ACC. */
5161 		bzero(&acc, sizeof (acc));
5162 		acc.ls_code = LA_ELS_ACC;
5163 		acc.page_length = 0x10;
5164 		acc.payload_length = tq->prli_payload_length;
5165 
5166 		param = (prli_svc_param_t *)&acc.service_params[0];
5167 		param->type = 0x08;
5168 		param->rsvd = 0x00;
5169 		param->process_assoc_flags = tq->prli_svc_param_word_0;
5170 		param->process_flags = tq->prli_svc_param_word_3;
5171 
5172 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5173 		    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5174 		    DDI_DEV_AUTOINCR);
5175 
5176 		pkt->pkt_state = FC_PKT_SUCCESS;
5177 	} else {
5178 		la_els_rjt_t rjt;
5179 
5180 		/* Build RJT. */
5181 		bzero(&rjt, sizeof (rjt));
5182 		rjt.ls_code.ls_code = LA_ELS_RJT;
5183 
5184 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5185 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5186 
5187 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5188 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5189 	}
5190 
5191 	if (rval != FC_SUCCESS) {
5192 		EL(ha, "failed, rval = %xh\n", rval);
5193 	} else {
5194 		/*EMPTY*/
5195 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5196 	}
5197 	return (rval);
5198 }
5199 
5200 /*
5201  * ql_els_prlo
5202  *	Issue a extended link service process logout request.
5203  *
5204  * Input:
5205  *	ha = adapter state pointer.
5206  *	pkt = pointer to fc_packet.
5207  *
5208  * Returns:
5209  *	FC_SUCCESS - the packet was accepted for transport.
5210  *	FC_TRANSPORT_ERROR - a transport error occurred.
5211  *
5212  * Context:
5213  *	Kernel context.
5214  */
5215 /* ARGSUSED */
5216 static int
5217 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5218 {
5219 	la_els_prli_t	acc;
5220 	int		rval = FC_SUCCESS;
5221 
5222 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5223 
5224 	/* Build ACC. */
5225 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5226 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5227 
5228 	acc.ls_code = LA_ELS_ACC;
5229 	acc.service_params[2] = 1;
5230 
5231 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5232 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5233 
5234 	pkt->pkt_state = FC_PKT_SUCCESS;
5235 
5236 	if (rval != FC_SUCCESS) {
5237 		EL(ha, "failed, rval = %xh\n", rval);
5238 	} else {
5239 		/*EMPTY*/
5240 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5241 	}
5242 	return (rval);
5243 }
5244 
5245 /*
5246  * ql_els_adisc
5247  *	Issue a extended link service address discovery request.
5248  *
5249  * Input:
5250  *	ha = adapter state pointer.
5251  *	pkt = pointer to fc_packet.
5252  *
5253  * Returns:
5254  *	FC_SUCCESS - the packet was accepted for transport.
5255  *	FC_TRANSPORT_ERROR - a transport error occurred.
5256  *
5257  * Context:
5258  *	Kernel context.
5259  */
5260 static int
5261 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5262 {
5263 	ql_dev_id_list_t	*list;
5264 	uint32_t		list_size;
5265 	ql_link_t		*link;
5266 	ql_tgt_t		*tq;
5267 	ql_lun_t		*lq;
5268 	port_id_t		d_id;
5269 	la_els_adisc_t		acc;
5270 	uint16_t		index, loop_id;
5271 	ql_mbx_data_t		mr;
5272 	int			rval = FC_SUCCESS;
5273 
5274 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5275 
5276 	bzero(&acc, sizeof (acc));
5277 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5278 
5279 	/*
5280 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5281 	 * the device from the firmware
5282 	 */
5283 	index = ql_alpa_to_index[d_id.b.al_pa];
5284 	tq = NULL;
5285 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5286 		tq = link->base_address;
5287 		if (tq->d_id.b24 == d_id.b24) {
5288 			break;
5289 		} else {
5290 			tq = NULL;
5291 		}
5292 	}
5293 
5294 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5295 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5296 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5297 
5298 		if (list != NULL &&
5299 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5300 		    QL_SUCCESS) {
5301 
5302 			for (index = 0; index < mr.mb[1]; index++) {
5303 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5304 
5305 				if (tq->d_id.b24 == d_id.b24) {
5306 					tq->loop_id = loop_id;
5307 					break;
5308 				}
5309 			}
5310 		} else {
5311 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5312 			    QL_NAME, ha->instance, d_id.b24);
5313 			tq = NULL;
5314 		}
5315 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5316 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5317 			    QL_NAME, ha->instance, tq->d_id.b24);
5318 			tq = NULL;
5319 		}
5320 
5321 		if (list != NULL) {
5322 			kmem_free(list, list_size);
5323 		}
5324 	}
5325 
5326 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5327 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5328 
5329 		/* Build ACC. */
5330 
5331 		DEVICE_QUEUE_LOCK(tq);
5332 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5333 		if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5334 			for (link = tq->lun_queues.first; link != NULL;
5335 			    link = link->next) {
5336 				lq = link->base_address;
5337 
5338 				if (lq->cmd.first != NULL) {
5339 					ql_next(ha, lq);
5340 					DEVICE_QUEUE_LOCK(tq);
5341 				}
5342 			}
5343 		}
5344 		DEVICE_QUEUE_UNLOCK(tq);
5345 
5346 		acc.ls_code.ls_code = LA_ELS_ACC;
5347 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5348 
5349 		bcopy((void *)&tq->port_name[0],
5350 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5351 		bcopy((void *)&tq->node_name[0],
5352 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5353 
5354 		acc.nport_id.port_id = tq->d_id.b24;
5355 
5356 		pkt->pkt_state = FC_PKT_SUCCESS;
5357 	} else {
5358 		/* Build RJT. */
5359 		acc.ls_code.ls_code = LA_ELS_RJT;
5360 
5361 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5362 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5363 	}
5364 
5365 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5366 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5367 
5368 	if (rval != FC_SUCCESS) {
5369 		EL(ha, "failed, rval = %xh\n", rval);
5370 	} else {
5371 		/*EMPTY*/
5372 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5373 	}
5374 	return (rval);
5375 }
5376 
5377 /*
5378  * ql_els_linit
5379  *	Issue a extended link service loop initialize request.
5380  *
5381  * Input:
5382  *	ha = adapter state pointer.
5383  *	pkt = pointer to fc_packet.
5384  *
5385  * Returns:
5386  *	FC_SUCCESS - the packet was accepted for transport.
5387  *	FC_TRANSPORT_ERROR - a transport error occurred.
5388  *
5389  * Context:
5390  *	Kernel context.
5391  */
5392 static int
5393 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5394 {
5395 	ddi_dma_cookie_t	*cp;
5396 	uint32_t		cnt;
5397 	conv_num_t		n;
5398 	port_id_t		d_id;
5399 	int			rval = FC_SUCCESS;
5400 
5401 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5402 
5403 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5404 	if (ha->topology & QL_SNS_CONNECTION) {
5405 		fc_linit_req_t els;
5406 		lfa_cmd_t lfa;
5407 
5408 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5409 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5410 
5411 		/* Setup LFA mailbox command data. */
5412 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5413 
5414 		lfa.resp_buffer_length[0] = 4;
5415 
5416 		cp = pkt->pkt_resp_cookie;
5417 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5418 			n.size64 = (uint64_t)cp->dmac_laddress;
5419 			LITTLE_ENDIAN_64(&n.size64);
5420 		} else {
5421 			n.size32[0] = LSD(cp->dmac_laddress);
5422 			LITTLE_ENDIAN_32(&n.size32[0]);
5423 			n.size32[1] = MSD(cp->dmac_laddress);
5424 			LITTLE_ENDIAN_32(&n.size32[1]);
5425 		}
5426 
5427 		/* Set buffer address. */
5428 		for (cnt = 0; cnt < 8; cnt++) {
5429 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5430 		}
5431 
5432 		lfa.subcommand_length[0] = 4;
5433 		n.size32[0] = d_id.b24;
5434 		LITTLE_ENDIAN_32(&n.size32[0]);
5435 		lfa.addr[0] = n.size8[0];
5436 		lfa.addr[1] = n.size8[1];
5437 		lfa.addr[2] = n.size8[2];
5438 		lfa.subcommand[1] = 0x70;
5439 		lfa.payload[2] = els.func;
5440 		lfa.payload[4] = els.lip_b3;
5441 		lfa.payload[5] = els.lip_b4;
5442 
5443 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5444 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5445 		} else {
5446 			pkt->pkt_state = FC_PKT_SUCCESS;
5447 		}
5448 	} else {
5449 		fc_linit_resp_t rjt;
5450 
5451 		/* Build RJT. */
5452 		bzero(&rjt, sizeof (rjt));
5453 		rjt.ls_code.ls_code = LA_ELS_RJT;
5454 
5455 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5456 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5457 
5458 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5459 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5460 	}
5461 
5462 	if (rval != FC_SUCCESS) {
5463 		EL(ha, "failed, rval = %xh\n", rval);
5464 	} else {
5465 		/*EMPTY*/
5466 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5467 	}
5468 	return (rval);
5469 }
5470 
5471 /*
5472  * ql_els_lpc
5473  *	Issue a extended link service loop control request.
5474  *
5475  * Input:
5476  *	ha = adapter state pointer.
5477  *	pkt = pointer to fc_packet.
5478  *
5479  * Returns:
5480  *	FC_SUCCESS - the packet was accepted for transport.
5481  *	FC_TRANSPORT_ERROR - a transport error occurred.
5482  *
5483  * Context:
5484  *	Kernel context.
5485  */
5486 static int
5487 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5488 {
5489 	ddi_dma_cookie_t	*cp;
5490 	uint32_t		cnt;
5491 	conv_num_t		n;
5492 	port_id_t		d_id;
5493 	int			rval = FC_SUCCESS;
5494 
5495 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5496 
5497 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5498 	if (ha->topology & QL_SNS_CONNECTION) {
5499 		ql_lpc_t els;
5500 		lfa_cmd_t lfa;
5501 
5502 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5503 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5504 
5505 		/* Setup LFA mailbox command data. */
5506 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5507 
5508 		lfa.resp_buffer_length[0] = 4;
5509 
5510 		cp = pkt->pkt_resp_cookie;
5511 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5512 			n.size64 = (uint64_t)(cp->dmac_laddress);
5513 			LITTLE_ENDIAN_64(&n.size64);
5514 		} else {
5515 			n.size32[0] = cp->dmac_address;
5516 			LITTLE_ENDIAN_32(&n.size32[0]);
5517 			n.size32[1] = 0;
5518 		}
5519 
5520 		/* Set buffer address. */
5521 		for (cnt = 0; cnt < 8; cnt++) {
5522 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5523 		}
5524 
5525 		lfa.subcommand_length[0] = 20;
5526 		n.size32[0] = d_id.b24;
5527 		LITTLE_ENDIAN_32(&n.size32[0]);
5528 		lfa.addr[0] = n.size8[0];
5529 		lfa.addr[1] = n.size8[1];
5530 		lfa.addr[2] = n.size8[2];
5531 		lfa.subcommand[1] = 0x71;
5532 		lfa.payload[4] = els.port_control;
5533 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5534 
5535 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5536 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5537 		} else {
5538 			pkt->pkt_state = FC_PKT_SUCCESS;
5539 		}
5540 	} else {
5541 		ql_lpc_resp_t rjt;
5542 
5543 		/* Build RJT. */
5544 		bzero(&rjt, sizeof (rjt));
5545 		rjt.ls_code.ls_code = LA_ELS_RJT;
5546 
5547 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5548 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5549 
5550 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5551 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5552 	}
5553 
5554 	if (rval != FC_SUCCESS) {
5555 		EL(ha, "failed, rval = %xh\n", rval);
5556 	} else {
5557 		/*EMPTY*/
5558 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5559 	}
5560 	return (rval);
5561 }
5562 
5563 /*
5564  * ql_els_lsts
5565  *	Issue a extended link service loop status request.
5566  *
5567  * Input:
5568  *	ha = adapter state pointer.
5569  *	pkt = pointer to fc_packet.
5570  *
5571  * Returns:
5572  *	FC_SUCCESS - the packet was accepted for transport.
5573  *	FC_TRANSPORT_ERROR - a transport error occurred.
5574  *
5575  * Context:
5576  *	Kernel context.
5577  */
5578 static int
5579 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5580 {
5581 	ddi_dma_cookie_t	*cp;
5582 	uint32_t		cnt;
5583 	conv_num_t		n;
5584 	port_id_t		d_id;
5585 	int			rval = FC_SUCCESS;
5586 
5587 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5588 
5589 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5590 	if (ha->topology & QL_SNS_CONNECTION) {
5591 		fc_lsts_req_t els;
5592 		lfa_cmd_t lfa;
5593 
5594 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5595 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5596 
5597 		/* Setup LFA mailbox command data. */
5598 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5599 
5600 		lfa.resp_buffer_length[0] = 84;
5601 
5602 		cp = pkt->pkt_resp_cookie;
5603 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5604 			n.size64 = cp->dmac_laddress;
5605 			LITTLE_ENDIAN_64(&n.size64);
5606 		} else {
5607 			n.size32[0] = cp->dmac_address;
5608 			LITTLE_ENDIAN_32(&n.size32[0]);
5609 			n.size32[1] = 0;
5610 		}
5611 
5612 		/* Set buffer address. */
5613 		for (cnt = 0; cnt < 8; cnt++) {
5614 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5615 		}
5616 
5617 		lfa.subcommand_length[0] = 2;
5618 		n.size32[0] = d_id.b24;
5619 		LITTLE_ENDIAN_32(&n.size32[0]);
5620 		lfa.addr[0] = n.size8[0];
5621 		lfa.addr[1] = n.size8[1];
5622 		lfa.addr[2] = n.size8[2];
5623 		lfa.subcommand[1] = 0x72;
5624 
5625 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5626 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5627 		} else {
5628 			pkt->pkt_state = FC_PKT_SUCCESS;
5629 		}
5630 	} else {
5631 		fc_lsts_resp_t rjt;
5632 
5633 		/* Build RJT. */
5634 		bzero(&rjt, sizeof (rjt));
5635 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5636 
5637 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5638 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5639 
5640 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5641 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5642 	}
5643 
5644 	if (rval != FC_SUCCESS) {
5645 		EL(ha, "failed=%xh\n", rval);
5646 	} else {
5647 		/*EMPTY*/
5648 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5649 	}
5650 	return (rval);
5651 }
5652 
5653 /*
5654  * ql_els_scr
5655  *	Issue a extended link service state change registration request.
5656  *
5657  * Input:
5658  *	ha = adapter state pointer.
5659  *	pkt = pointer to fc_packet.
5660  *
5661  * Returns:
5662  *	FC_SUCCESS - the packet was accepted for transport.
5663  *	FC_TRANSPORT_ERROR - a transport error occurred.
5664  *
5665  * Context:
5666  *	Kernel context.
5667  */
5668 static int
5669 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5670 {
5671 	fc_scr_resp_t	acc;
5672 	int		rval = FC_SUCCESS;
5673 
5674 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5675 
5676 	bzero(&acc, sizeof (acc));
5677 	if (ha->topology & QL_SNS_CONNECTION) {
5678 		fc_scr_req_t els;
5679 
5680 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5681 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5682 
5683 		if (ql_send_change_request(ha, els.scr_func) ==
5684 		    QL_SUCCESS) {
5685 			/* Build ACC. */
5686 			acc.scr_acc = LA_ELS_ACC;
5687 
5688 			pkt->pkt_state = FC_PKT_SUCCESS;
5689 		} else {
5690 			/* Build RJT. */
5691 			acc.scr_acc = LA_ELS_RJT;
5692 
5693 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5694 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5695 		}
5696 	} else {
5697 		/* Build RJT. */
5698 		acc.scr_acc = LA_ELS_RJT;
5699 
5700 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5701 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5702 	}
5703 
5704 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5705 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5706 
5707 	if (rval != FC_SUCCESS) {
5708 		EL(ha, "failed, rval = %xh\n", rval);
5709 	} else {
5710 		/*EMPTY*/
5711 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5712 	}
5713 	return (rval);
5714 }
5715 
5716 /*
5717  * ql_els_rscn
5718  *	Issue a extended link service register state
5719  *	change notification request.
5720  *
5721  * Input:
5722  *	ha = adapter state pointer.
5723  *	pkt = pointer to fc_packet.
5724  *
5725  * Returns:
5726  *	FC_SUCCESS - the packet was accepted for transport.
5727  *	FC_TRANSPORT_ERROR - a transport error occurred.
5728  *
5729  * Context:
5730  *	Kernel context.
5731  */
5732 static int
5733 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5734 {
5735 	ql_rscn_resp_t	acc;
5736 	int		rval = FC_SUCCESS;
5737 
5738 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5739 
5740 	bzero(&acc, sizeof (acc));
5741 	if (ha->topology & QL_SNS_CONNECTION) {
5742 		/* Build ACC. */
5743 		acc.scr_acc = LA_ELS_ACC;
5744 
5745 		pkt->pkt_state = FC_PKT_SUCCESS;
5746 	} else {
5747 		/* Build RJT. */
5748 		acc.scr_acc = LA_ELS_RJT;
5749 
5750 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5751 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5752 	}
5753 
5754 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5755 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5756 
5757 	if (rval != FC_SUCCESS) {
5758 		EL(ha, "failed, rval = %xh\n", rval);
5759 	} else {
5760 		/*EMPTY*/
5761 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5762 	}
5763 	return (rval);
5764 }
5765 
5766 /*
5767  * ql_els_farp_req
5768  *	Issue FC Address Resolution Protocol (FARP)
5769  *	extended link service request.
5770  *
5771  *	Note: not supported.
5772  *
5773  * Input:
5774  *	ha = adapter state pointer.
5775  *	pkt = pointer to fc_packet.
5776  *
5777  * Returns:
5778  *	FC_SUCCESS - the packet was accepted for transport.
5779  *	FC_TRANSPORT_ERROR - a transport error occurred.
5780  *
5781  * Context:
5782  *	Kernel context.
5783  */
5784 static int
5785 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5786 {
5787 	ql_acc_rjt_t	acc;
5788 	int		rval = FC_SUCCESS;
5789 
5790 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5791 
5792 	bzero(&acc, sizeof (acc));
5793 
5794 	/* Build ACC. */
5795 	acc.ls_code.ls_code = LA_ELS_ACC;
5796 
5797 	pkt->pkt_state = FC_PKT_SUCCESS;
5798 
5799 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5800 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5801 
5802 	if (rval != FC_SUCCESS) {
5803 		EL(ha, "failed, rval = %xh\n", rval);
5804 	} else {
5805 		/*EMPTY*/
5806 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5807 	}
5808 	return (rval);
5809 }
5810 
5811 /*
5812  * ql_els_farp_reply
5813  *	Issue FC Address Resolution Protocol (FARP)
5814  *	extended link service reply.
5815  *
5816  *	Note: not supported.
5817  *
5818  * Input:
5819  *	ha = adapter state pointer.
5820  *	pkt = pointer to fc_packet.
5821  *
5822  * Returns:
5823  *	FC_SUCCESS - the packet was accepted for transport.
5824  *	FC_TRANSPORT_ERROR - a transport error occurred.
5825  *
5826  * Context:
5827  *	Kernel context.
5828  */
5829 /* ARGSUSED */
5830 static int
5831 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5832 {
5833 	ql_acc_rjt_t	acc;
5834 	int		rval = FC_SUCCESS;
5835 
5836 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5837 
5838 	bzero(&acc, sizeof (acc));
5839 
5840 	/* Build ACC. */
5841 	acc.ls_code.ls_code = LA_ELS_ACC;
5842 
5843 	pkt->pkt_state = FC_PKT_SUCCESS;
5844 
5845 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5846 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5847 
5848 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5849 
5850 	return (rval);
5851 }
5852 
5853 static int
5854 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5855 {
5856 	uchar_t			*rnid_acc;
5857 	port_id_t		d_id;
5858 	ql_link_t		*link;
5859 	ql_tgt_t		*tq;
5860 	uint16_t		index;
5861 	la_els_rnid_acc_t	acc;
5862 	la_els_rnid_t		*req;
5863 	size_t			req_len;
5864 
5865 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5866 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5867 	index = ql_alpa_to_index[d_id.b.al_pa];
5868 
5869 	tq = NULL;
5870 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5871 		tq = link->base_address;
5872 		if (tq->d_id.b24 == d_id.b24) {
5873 			break;
5874 		} else {
5875 			tq = NULL;
5876 		}
5877 	}
5878 
5879 	/* Allocate memory for rnid status block */
5880 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5881 	ASSERT(rnid_acc != NULL);
5882 
5883 	bzero(&acc, sizeof (acc));
5884 
5885 	req = (la_els_rnid_t *)pkt->pkt_cmd;
5886 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5887 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
5888 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
5889 
5890 		kmem_free(rnid_acc, req_len);
5891 		acc.ls_code.ls_code = LA_ELS_RJT;
5892 
5893 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5894 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5895 
5896 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5897 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5898 
5899 		return (FC_FAILURE);
5900 	}
5901 
5902 	acc.ls_code.ls_code = LA_ELS_ACC;
5903 	bcopy(rnid_acc, &acc.hdr, req_len);
5904 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5905 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5906 
5907 	kmem_free(rnid_acc, req_len);
5908 	pkt->pkt_state = FC_PKT_SUCCESS;
5909 
5910 	return (FC_SUCCESS);
5911 }
5912 
5913 static int
5914 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
5915 {
5916 	fc_rls_acc_t		*rls_acc;
5917 	port_id_t		d_id;
5918 	ql_link_t		*link;
5919 	ql_tgt_t		*tq;
5920 	uint16_t		index;
5921 	la_els_rls_acc_t	acc;
5922 
5923 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5924 	index = ql_alpa_to_index[d_id.b.al_pa];
5925 
5926 	tq = NULL;
5927 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5928 		tq = link->base_address;
5929 		if (tq->d_id.b24 == d_id.b24) {
5930 			break;
5931 		} else {
5932 			tq = NULL;
5933 		}
5934 	}
5935 
5936 	/* Allocate memory for link error status block */
5937 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
5938 	ASSERT(rls_acc != NULL);
5939 
5940 	bzero(&acc, sizeof (la_els_rls_acc_t));
5941 
5942 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5943 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
5944 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
5945 
5946 		kmem_free(rls_acc, sizeof (*rls_acc));
5947 		acc.ls_code.ls_code = LA_ELS_RJT;
5948 
5949 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5950 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5951 
5952 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5953 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5954 
5955 		return (FC_FAILURE);
5956 	}
5957 
5958 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
5959 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
5960 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
5961 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
5962 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
5963 
5964 	acc.ls_code.ls_code = LA_ELS_ACC;
5965 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
5966 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
5967 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
5968 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
5969 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
5970 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5971 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5972 
5973 	kmem_free(rls_acc, sizeof (*rls_acc));
5974 	pkt->pkt_state = FC_PKT_SUCCESS;
5975 
5976 	return (FC_SUCCESS);
5977 }
5978 
5979 static int
5980 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
5981 {
5982 	port_id_t	d_id;
5983 	ql_srb_t	*sp;
5984 	fc_unsol_buf_t  *ubp;
5985 	ql_link_t	*link, *next_link;
5986 	int		rval = FC_SUCCESS;
5987 	int		cnt = 5;
5988 
5989 	/*
5990 	 * we need to ensure that q->outcnt == 0, otherwise
5991 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
5992 	 * will confuse ulps.
5993 	 */
5994 
5995 	DEVICE_QUEUE_LOCK(tq);
5996 	do {
5997 		/*
5998 		 * wait for the cmds to get drained. If they
5999 		 * don't get drained then the transport will
6000 		 * retry PLOGI after few secs.
6001 		 */
6002 		if (tq->outcnt != 0) {
6003 			rval = FC_TRAN_BUSY;
6004 			DEVICE_QUEUE_UNLOCK(tq);
6005 			ql_delay(ha, 10000);
6006 			DEVICE_QUEUE_LOCK(tq);
6007 			cnt--;
6008 			if (!cnt) {
6009 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6010 				    " for %xh outcount %xh", QL_NAME,
6011 				    ha->instance, tq->d_id.b24, tq->outcnt);
6012 			}
6013 		} else {
6014 			rval = FC_SUCCESS;
6015 			break;
6016 		}
6017 	} while (cnt > 0);
6018 	DEVICE_QUEUE_UNLOCK(tq);
6019 
6020 	/*
6021 	 * return, if busy or if the plogi was asynchronous.
6022 	 */
6023 	if ((rval != FC_SUCCESS) ||
6024 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6025 	    pkt->pkt_comp)) {
6026 		return (rval);
6027 	}
6028 
6029 	/*
6030 	 * Let us give daemon sufficient time and hopefully
6031 	 * when transport retries PLOGI, it would have flushed
6032 	 * callback queue.
6033 	 */
6034 	TASK_DAEMON_LOCK(ha);
6035 	for (link = ha->callback_queue.first; link != NULL;
6036 	    link = next_link) {
6037 		next_link = link->next;
6038 		sp = link->base_address;
6039 		if (sp->flags & SRB_UB_CALLBACK) {
6040 			ubp = ha->ub_array[sp->handle];
6041 			d_id.b24 = ubp->ub_frame.s_id;
6042 		} else {
6043 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6044 		}
6045 		if (tq->d_id.b24 == d_id.b24) {
6046 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6047 			    ha->instance, tq->d_id.b24);
6048 			rval = FC_TRAN_BUSY;
6049 			break;
6050 		}
6051 	}
6052 	TASK_DAEMON_UNLOCK(ha);
6053 
6054 	return (rval);
6055 }
6056 
6057 /*
6058  * ql_login_port
6059  *	Logs in a device if not already logged in.
6060  *
6061  * Input:
6062  *	ha = adapter state pointer.
6063  *	d_id = 24 bit port ID.
6064  *	DEVICE_QUEUE_LOCK must be released.
6065  *
6066  * Returns:
6067  *	QL local function return status code.
6068  *
6069  * Context:
6070  *	Kernel context.
6071  */
6072 static int
6073 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6074 {
6075 	ql_adapter_state_t	*vha;
6076 	ql_link_t		*link;
6077 	uint16_t		index;
6078 	ql_tgt_t		*tq, *tq2;
6079 	uint16_t		loop_id, first_loop_id, last_loop_id;
6080 	int			rval = QL_SUCCESS;
6081 
6082 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6083 
6084 	/* Get head queue index. */
6085 	index = ql_alpa_to_index[d_id.b.al_pa];
6086 
6087 	/* Check for device already has a queue. */
6088 	tq = NULL;
6089 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6090 		tq = link->base_address;
6091 		if (tq->d_id.b24 == d_id.b24) {
6092 			loop_id = tq->loop_id;
6093 			break;
6094 		} else {
6095 			tq = NULL;
6096 		}
6097 	}
6098 
6099 	/* Let's stop issuing any IO and unsolicited logo */
6100 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6101 		DEVICE_QUEUE_LOCK(tq);
6102 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6103 		tq->flags &= ~TQF_RSCN_RCVD;
6104 		DEVICE_QUEUE_UNLOCK(tq);
6105 	}
6106 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6107 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6108 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6109 	}
6110 
6111 	/* Special case for Nameserver */
6112 	if (d_id.b24 == 0xFFFFFC) {
6113 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
6114 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6115 		if (tq == NULL) {
6116 			ADAPTER_STATE_LOCK(ha);
6117 			tq = ql_dev_init(ha, d_id, loop_id);
6118 			ADAPTER_STATE_UNLOCK(ha);
6119 			if (tq == NULL) {
6120 				EL(ha, "failed=%xh, d_id=%xh\n",
6121 				    QL_FUNCTION_FAILED, d_id.b24);
6122 				return (QL_FUNCTION_FAILED);
6123 			}
6124 		}
6125 		rval = ql_login_fabric_port(ha, tq, loop_id);
6126 		if (rval == QL_SUCCESS) {
6127 			tq->loop_id = loop_id;
6128 			tq->flags |= TQF_FABRIC_DEVICE;
6129 			(void) ql_get_port_database(ha, tq, PDF_NONE);
6130 			ha->topology = (uint8_t)
6131 			    (ha->topology | QL_SNS_CONNECTION);
6132 		}
6133 	/* Check for device already logged in. */
6134 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6135 		if (tq->flags & TQF_FABRIC_DEVICE) {
6136 			rval = ql_login_fabric_port(ha, tq, loop_id);
6137 			if (rval == QL_PORT_ID_USED) {
6138 				rval = QL_SUCCESS;
6139 			}
6140 		} else if (LOCAL_LOOP_ID(loop_id)) {
6141 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6142 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6143 			    LLF_NONE : LLF_PLOGI));
6144 			if (rval == QL_SUCCESS) {
6145 				DEVICE_QUEUE_LOCK(tq);
6146 				tq->loop_id = loop_id;
6147 				DEVICE_QUEUE_UNLOCK(tq);
6148 			}
6149 		}
6150 	} else if (ha->topology & QL_SNS_CONNECTION) {
6151 		/* Locate unused loop ID. */
6152 		if (CFG_IST(ha, CFG_CTRL_2425)) {
6153 			first_loop_id = 0;
6154 			last_loop_id = LAST_N_PORT_HDL;
6155 		} else if (ha->topology & QL_F_PORT) {
6156 			first_loop_id = 0;
6157 			last_loop_id = SNS_LAST_LOOP_ID;
6158 		} else {
6159 			first_loop_id = SNS_FIRST_LOOP_ID;
6160 			last_loop_id = SNS_LAST_LOOP_ID;
6161 		}
6162 
6163 		/* Acquire adapter state lock. */
6164 		ADAPTER_STATE_LOCK(ha);
6165 
6166 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6167 		if (tq == NULL) {
6168 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6169 			    d_id.b24);
6170 
6171 			ADAPTER_STATE_UNLOCK(ha);
6172 
6173 			return (QL_FUNCTION_FAILED);
6174 		}
6175 
6176 		rval = QL_FUNCTION_FAILED;
6177 		loop_id = ha->pha->free_loop_id++;
6178 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6179 		    index--) {
6180 			if (loop_id < first_loop_id ||
6181 			    loop_id > last_loop_id) {
6182 				loop_id = first_loop_id;
6183 				ha->pha->free_loop_id = (uint16_t)
6184 				    (loop_id + 1);
6185 			}
6186 
6187 			/* Bypass if loop ID used. */
6188 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6189 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6190 				if (tq2 != NULL && tq2 != tq) {
6191 					break;
6192 				}
6193 			}
6194 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6195 			    loop_id == ha->loop_id) {
6196 				loop_id = ha->pha->free_loop_id++;
6197 				continue;
6198 			}
6199 
6200 			ADAPTER_STATE_UNLOCK(ha);
6201 			rval = ql_login_fabric_port(ha, tq, loop_id);
6202 
6203 			/*
6204 			 * If PORT_ID_USED is returned
6205 			 * the login_fabric_port() updates
6206 			 * with the correct loop ID
6207 			 */
6208 			switch (rval) {
6209 			case QL_PORT_ID_USED:
6210 				/*
6211 				 * use f/w handle and try to
6212 				 * login again.
6213 				 */
6214 				ADAPTER_STATE_LOCK(ha);
6215 				ha->pha->free_loop_id--;
6216 				ADAPTER_STATE_UNLOCK(ha);
6217 				loop_id = tq->loop_id;
6218 				break;
6219 			case QL_SUCCESS:
6220 				tq->flags |= TQF_FABRIC_DEVICE;
6221 				(void) ql_get_port_database(ha,
6222 				    tq, PDF_NONE);
6223 				index = 1;
6224 				break;
6225 
6226 			case QL_LOOP_ID_USED:
6227 				tq->loop_id = PORT_NO_LOOP_ID;
6228 				loop_id = ha->pha->free_loop_id++;
6229 				break;
6230 
6231 			case QL_ALL_IDS_IN_USE:
6232 				tq->loop_id = PORT_NO_LOOP_ID;
6233 				index = 1;
6234 				break;
6235 
6236 			default:
6237 				tq->loop_id = PORT_NO_LOOP_ID;
6238 				index = 1;
6239 				break;
6240 			}
6241 
6242 			ADAPTER_STATE_LOCK(ha);
6243 		}
6244 
6245 		ADAPTER_STATE_UNLOCK(ha);
6246 	} else {
6247 		rval = QL_FUNCTION_FAILED;
6248 	}
6249 
6250 	if (rval != QL_SUCCESS) {
6251 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6252 	} else {
6253 		EL(ha, "d_id=%xh, loop_id=%xh, "
6254 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6255 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6256 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6257 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6258 	}
6259 	return (rval);
6260 }
6261 
6262 /*
6263  * ql_login_fabric_port
6264  *	Issue login fabric port mailbox command.
6265  *
6266  * Input:
6267  *	ha:		adapter state pointer.
6268  *	tq:		target queue pointer.
6269  *	loop_id:	FC Loop ID.
6270  *
6271  * Returns:
6272  *	ql local function return status code.
6273  *
6274  * Context:
6275  *	Kernel context.
6276  */
6277 static int
6278 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6279 {
6280 	int		rval;
6281 	int		index;
6282 	int		retry = 0;
6283 	port_id_t	d_id;
6284 	ql_tgt_t	*newq;
6285 	ql_mbx_data_t	mr;
6286 
6287 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6288 
6289 	/*
6290 	 * QL_PARAMETER_ERROR also means the firmware is
6291 	 * not able to allocate PCB entry due to resource
6292 	 * issues, or collision.
6293 	 */
6294 	do {
6295 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6296 		if ((rval == QL_PARAMETER_ERROR) ||
6297 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6298 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6299 			retry++;
6300 			drv_usecwait(10 * MILLISEC);
6301 		} else {
6302 			break;
6303 		}
6304 	} while (retry < 5);
6305 
6306 	switch (rval) {
6307 	case QL_SUCCESS:
6308 		tq->loop_id = loop_id;
6309 		break;
6310 
6311 	case QL_PORT_ID_USED:
6312 		/*
6313 		 * This Loop ID should NOT be in use in drivers
6314 		 */
6315 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6316 
6317 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6318 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6319 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6320 			    newq->loop_id, newq->d_id.b24);
6321 			ql_send_logo(ha, newq, NULL);
6322 		}
6323 
6324 		tq->loop_id = mr.mb[1];
6325 		break;
6326 
6327 	case QL_LOOP_ID_USED:
6328 		d_id.b.al_pa = LSB(mr.mb[2]);
6329 		d_id.b.area = MSB(mr.mb[2]);
6330 		d_id.b.domain = LSB(mr.mb[1]);
6331 
6332 		newq = ql_d_id_to_queue(ha, d_id);
6333 		if (newq && (newq->loop_id != loop_id)) {
6334 			/*
6335 			 * This should NEVER ever happen; but this
6336 			 * code is needed to bail out when the worst
6337 			 * case happens - or as used to happen before
6338 			 */
6339 			ASSERT(newq->d_id.b24 == d_id.b24);
6340 
6341 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6342 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6343 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6344 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6345 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6346 			    newq->d_id.b24, loop_id);
6347 
6348 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6349 				ADAPTER_STATE_LOCK(ha);
6350 
6351 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6352 				ql_add_link_b(&ha->dev[index], &newq->device);
6353 
6354 				newq->d_id.b24 = d_id.b24;
6355 
6356 				index = ql_alpa_to_index[d_id.b.al_pa];
6357 				ql_add_link_b(&ha->dev[index], &newq->device);
6358 
6359 				ADAPTER_STATE_UNLOCK(ha);
6360 			}
6361 
6362 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6363 
6364 		}
6365 
6366 		/*
6367 		 * Invalidate the loop ID for the
6368 		 * us to obtain a new one.
6369 		 */
6370 		tq->loop_id = PORT_NO_LOOP_ID;
6371 		break;
6372 
6373 	case QL_ALL_IDS_IN_USE:
6374 		rval = QL_FUNCTION_FAILED;
6375 		EL(ha, "no loop id's available\n");
6376 		break;
6377 
6378 	default:
6379 		if (rval == QL_COMMAND_ERROR) {
6380 			switch (mr.mb[1]) {
6381 			case 2:
6382 			case 3:
6383 				rval = QL_MEMORY_ALLOC_FAILED;
6384 				break;
6385 
6386 			case 4:
6387 				rval = QL_FUNCTION_TIMEOUT;
6388 				break;
6389 			case 7:
6390 				rval = QL_FABRIC_NOT_INITIALIZED;
6391 				break;
6392 			default:
6393 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6394 				break;
6395 			}
6396 		} else {
6397 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6398 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6399 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6400 		}
6401 		break;
6402 	}
6403 
6404 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6405 	    rval != QL_LOOP_ID_USED) {
6406 		EL(ha, "failed=%xh\n", rval);
6407 	} else {
6408 		/*EMPTY*/
6409 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6410 	}
6411 	return (rval);
6412 }
6413 
6414 /*
6415  * ql_logout_port
6416  *	Logs out a device if possible.
6417  *
6418  * Input:
6419  *	ha:	adapter state pointer.
6420  *	d_id:	24 bit port ID.
6421  *
6422  * Returns:
6423  *	QL local function return status code.
6424  *
6425  * Context:
6426  *	Kernel context.
6427  */
6428 static int
6429 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6430 {
6431 	ql_link_t	*link;
6432 	ql_tgt_t	*tq;
6433 	uint16_t	index;
6434 
6435 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6436 
6437 	/* Get head queue index. */
6438 	index = ql_alpa_to_index[d_id.b.al_pa];
6439 
6440 	/* Get device queue. */
6441 	tq = NULL;
6442 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6443 		tq = link->base_address;
6444 		if (tq->d_id.b24 == d_id.b24) {
6445 			break;
6446 		} else {
6447 			tq = NULL;
6448 		}
6449 	}
6450 
6451 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6452 		(void) ql_logout_fabric_port(ha, tq);
6453 		tq->loop_id = PORT_NO_LOOP_ID;
6454 	}
6455 
6456 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6457 
6458 	return (QL_SUCCESS);
6459 }
6460 
6461 /*
6462  * ql_dev_init
6463  *	Initialize/allocate device queue.
6464  *
6465  * Input:
6466  *	ha:		adapter state pointer.
6467  *	d_id:		device destination ID
6468  *	loop_id:	device loop ID
6469  *	ADAPTER_STATE_LOCK must be already obtained.
6470  *
6471  * Returns:
6472  *	NULL = failure
6473  *
6474  * Context:
6475  *	Kernel context.
6476  */
6477 ql_tgt_t *
6478 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6479 {
6480 	ql_link_t	*link;
6481 	uint16_t	index;
6482 	ql_tgt_t	*tq;
6483 
6484 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6485 	    ha->instance, d_id.b24, loop_id);
6486 
6487 	index = ql_alpa_to_index[d_id.b.al_pa];
6488 
6489 	/* If device queue exists, set proper loop ID. */
6490 	tq = NULL;
6491 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6492 		tq = link->base_address;
6493 		if (tq->d_id.b24 == d_id.b24) {
6494 			tq->loop_id = loop_id;
6495 
6496 			/* Reset port down retry count. */
6497 			tq->port_down_retry_count = ha->port_down_retry_count;
6498 			tq->qfull_retry_count = ha->qfull_retry_count;
6499 
6500 			break;
6501 		} else {
6502 			tq = NULL;
6503 		}
6504 	}
6505 
6506 	/* If device does not have queue. */
6507 	if (tq == NULL) {
6508 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6509 		if (tq != NULL) {
6510 			/*
6511 			 * mutex to protect the device queue,
6512 			 * does not block interrupts.
6513 			 */
6514 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6515 			    (ha->iflags & IFLG_INTR_AIF) ?
6516 			    (void *)(uintptr_t)ha->intr_pri :
6517 			    (void *)(uintptr_t)ha->iblock_cookie);
6518 
6519 			tq->d_id.b24 = d_id.b24;
6520 			tq->loop_id = loop_id;
6521 			tq->device.base_address = tq;
6522 			tq->iidma_rate = IIDMA_RATE_INIT;
6523 
6524 			/* Reset port down retry count. */
6525 			tq->port_down_retry_count = ha->port_down_retry_count;
6526 			tq->qfull_retry_count = ha->qfull_retry_count;
6527 
6528 			/* Add device to device queue. */
6529 			ql_add_link_b(&ha->dev[index], &tq->device);
6530 		}
6531 	}
6532 
6533 	if (tq == NULL) {
6534 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6535 	} else {
6536 		/*EMPTY*/
6537 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6538 	}
6539 	return (tq);
6540 }
6541 
6542 /*
6543  * ql_dev_free
6544  *	Remove queue from device list and frees resources used by queue.
6545  *
6546  * Input:
6547  *	ha:	adapter state pointer.
6548  *	tq:	target queue pointer.
6549  *	ADAPTER_STATE_LOCK must be already obtained.
6550  *
6551  * Context:
6552  *	Kernel context.
6553  */
6554 static void
6555 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6556 {
6557 	ql_link_t	*link;
6558 	uint16_t	index;
6559 	ql_lun_t	*lq;
6560 
6561 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6562 
6563 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6564 		lq = link->base_address;
6565 		if (lq->cmd.first != NULL) {
6566 			return;
6567 		}
6568 	}
6569 
6570 	if (tq->outcnt == 0) {
6571 		/* Get head queue index. */
6572 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6573 		for (link = ha->dev[index].first; link != NULL;
6574 		    link = link->next) {
6575 			if (link->base_address == tq) {
6576 				ql_remove_link(&ha->dev[index], link);
6577 
6578 				for (link = tq->lun_queues.first;
6579 				    link != NULL; /* CSTYLE */) {
6580 					lq = link->base_address;
6581 					link = link->next;
6582 
6583 					ql_remove_link(&tq->lun_queues,
6584 					    &lq->link);
6585 					kmem_free(lq, sizeof (ql_lun_t));
6586 				}
6587 
6588 				mutex_destroy(&tq->mutex);
6589 				kmem_free(tq, sizeof (ql_tgt_t));
6590 				break;
6591 			}
6592 		}
6593 	}
6594 
6595 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6596 }
6597 
6598 /*
6599  * ql_lun_queue
6600  *	Allocate LUN queue if does not exists.
6601  *
6602  * Input:
6603  *	ha:	adapter state pointer.
6604  *	tq:	target queue.
6605  *	lun:	LUN number.
6606  *
6607  * Returns:
6608  *	NULL = failure
6609  *
6610  * Context:
6611  *	Kernel context.
6612  */
6613 static ql_lun_t *
6614 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6615 {
6616 	ql_lun_t	*lq;
6617 	ql_link_t	*link;
6618 
6619 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6620 
6621 	/* Fast path. */
6622 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6623 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6624 		return (tq->last_lun_queue);
6625 	}
6626 
6627 	if (lun >= MAX_LUNS) {
6628 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6629 		return (NULL);
6630 	}
6631 	/* If device queue exists, set proper loop ID. */
6632 	lq = NULL;
6633 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6634 		lq = link->base_address;
6635 		if (lq->lun_no == lun) {
6636 			QL_PRINT_3(CE_CONT, "(%d): found done\n",
6637 			    ha->instance);
6638 			tq->last_lun_queue = lq;
6639 			return (lq);
6640 		}
6641 	}
6642 
6643 	/* If queue does exist. */
6644 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6645 
6646 	/* Initialize LUN queue. */
6647 	if (lq != NULL) {
6648 		lq->link.base_address = lq;
6649 
6650 		lq->lun_no = lun;
6651 		lq->target_queue = tq;
6652 
6653 		DEVICE_QUEUE_LOCK(tq);
6654 		ql_add_link_b(&tq->lun_queues, &lq->link);
6655 		DEVICE_QUEUE_UNLOCK(tq);
6656 		tq->last_lun_queue = lq;
6657 	}
6658 
6659 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6660 
6661 	return (lq);
6662 }
6663 
6664 /*
6665  * ql_fcp_scsi_cmd
6666  *	Process fibre channel (FCP) SCSI protocol commands.
6667  *
6668  * Input:
6669  *	ha = adapter state pointer.
6670  *	pkt = pointer to fc_packet.
6671  *	sp = srb pointer.
6672  *
6673  * Returns:
6674  *	FC_SUCCESS - the packet was accepted for transport.
6675  *	FC_TRANSPORT_ERROR - a transport error occurred.
6676  *
6677  * Context:
6678  *	Kernel context.
6679  */
6680 static int
6681 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6682 {
6683 	port_id_t	d_id;
6684 	ql_tgt_t	*tq;
6685 	uint64_t	*ptr;
6686 	uint16_t	lun;
6687 
6688 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6689 
6690 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6691 	if (tq == NULL) {
6692 		d_id.r.rsvd_1 = 0;
6693 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6694 		tq = ql_d_id_to_queue(ha, d_id);
6695 	}
6696 
6697 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6698 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6699 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6700 
6701 	if (tq != NULL &&
6702 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6703 
6704 		/*
6705 		 * zero out FCP response; 24 Bytes
6706 		 */
6707 		ptr = (uint64_t *)pkt->pkt_resp;
6708 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6709 
6710 		/* Handle task management function. */
6711 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6712 		    sp->fcp->fcp_cntl.cntl_clr_aca |
6713 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6714 		    sp->fcp->fcp_cntl.cntl_reset_lun |
6715 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6716 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6717 			ql_task_mgmt(ha, tq, pkt, sp);
6718 		} else {
6719 			ha->pha->xioctl->IosRequested++;
6720 			ha->pha->xioctl->BytesRequested += (uint32_t)
6721 			    sp->fcp->fcp_data_len;
6722 
6723 			/*
6724 			 * Setup for commands with data transfer
6725 			 */
6726 			sp->iocb = ha->fcp_cmd;
6727 			if (sp->fcp->fcp_data_len != 0) {
6728 				/*
6729 				 * FCP data is bound to pkt_data_dma
6730 				 */
6731 				if (sp->fcp->fcp_cntl.cntl_write_data) {
6732 					(void) ddi_dma_sync(pkt->pkt_data_dma,
6733 					    0, 0, DDI_DMA_SYNC_FORDEV);
6734 				}
6735 
6736 				/* Setup IOCB count. */
6737 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6738 					uint32_t	cnt;
6739 
6740 					cnt = pkt->pkt_data_cookie_cnt -
6741 					    ha->cmd_segs;
6742 					sp->req_cnt = (uint16_t)
6743 					    (cnt / ha->cmd_cont_segs);
6744 					if (cnt % ha->cmd_cont_segs) {
6745 						sp->req_cnt = (uint16_t)
6746 						    (sp->req_cnt + 2);
6747 					} else {
6748 						sp->req_cnt++;
6749 					}
6750 				} else {
6751 					sp->req_cnt = 1;
6752 				}
6753 			} else {
6754 				sp->req_cnt = 1;
6755 			}
6756 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6757 
6758 			return (ql_start_cmd(ha, tq, pkt, sp));
6759 		}
6760 	} else {
6761 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6762 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6763 
6764 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6765 			ql_awaken_task_daemon(ha, sp, 0, 0);
6766 	}
6767 
6768 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6769 
6770 	return (FC_SUCCESS);
6771 }
6772 
6773 /*
6774  * ql_task_mgmt
6775  *	Task management function processor.
6776  *
6777  * Input:
6778  *	ha:	adapter state pointer.
6779  *	tq:	target queue pointer.
6780  *	pkt:	pointer to fc_packet.
6781  *	sp:	SRB pointer.
6782  *
6783  * Context:
6784  *	Kernel context.
6785  */
6786 static void
6787 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6788     ql_srb_t *sp)
6789 {
6790 	fcp_rsp_t		*fcpr;
6791 	struct fcp_rsp_info	*rsp;
6792 	uint16_t		lun;
6793 
6794 	ASSERT(pkt->pkt_cmd_dma == NULL && pkt->pkt_resp_dma == NULL);
6795 
6796 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6797 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6798 
6799 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6800 
6801 	bzero(fcpr, pkt->pkt_rsplen);
6802 
6803 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6804 	fcpr->fcp_response_len = 8;
6805 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6806 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6807 
6808 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6809 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6810 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6811 		}
6812 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6813 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6814 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6815 		}
6816 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6817 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6818 		    QL_SUCCESS) {
6819 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6820 		}
6821 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6822 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6823 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6824 		}
6825 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6826 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6827 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6828 		}
6829 	} else {
6830 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6831 	}
6832 
6833 	pkt->pkt_state = FC_PKT_SUCCESS;
6834 
6835 	/* Do command callback. */
6836 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6837 		ql_awaken_task_daemon(ha, sp, 0, 0);
6838 	}
6839 
6840 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6841 }
6842 
6843 /*
6844  * ql_fcp_ip_cmd
6845  *	Process fibre channel (FCP) Internet (IP) protocols commands.
6846  *
6847  * Input:
6848  *	ha:	adapter state pointer.
6849  *	pkt:	pointer to fc_packet.
6850  *	sp:	SRB pointer.
6851  *
6852  * Returns:
6853  *	FC_SUCCESS - the packet was accepted for transport.
6854  *	FC_TRANSPORT_ERROR - a transport error occurred.
6855  *
6856  * Context:
6857  *	Kernel context.
6858  */
6859 static int
6860 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6861 {
6862 	port_id_t	d_id;
6863 	ql_tgt_t	*tq;
6864 
6865 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6866 
6867 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6868 	if (tq == NULL) {
6869 		d_id.r.rsvd_1 = 0;
6870 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6871 		tq = ql_d_id_to_queue(ha, d_id);
6872 	}
6873 
6874 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
6875 		/*
6876 		 * IP data is bound to pkt_cmd_dma
6877 		 */
6878 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
6879 		    0, 0, DDI_DMA_SYNC_FORDEV);
6880 
6881 		/* Setup IOCB count. */
6882 		sp->iocb = ha->ip_cmd;
6883 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
6884 			uint32_t	cnt;
6885 
6886 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
6887 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
6888 			if (cnt % ha->cmd_cont_segs) {
6889 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6890 			} else {
6891 				sp->req_cnt++;
6892 			}
6893 		} else {
6894 			sp->req_cnt = 1;
6895 		}
6896 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6897 
6898 		return (ql_start_cmd(ha, tq, pkt, sp));
6899 	} else {
6900 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6901 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6902 
6903 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6904 			ql_awaken_task_daemon(ha, sp, 0, 0);
6905 	}
6906 
6907 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6908 
6909 	return (FC_SUCCESS);
6910 }
6911 
6912 /*
6913  * ql_fcp_data_rsp
6914  *	Process fibre channel protocol (FCP) data and response.
6915  *
6916  * Input:
6917  *	ha:	adapter state pointer.
6918  *	pkt:	pointer to fc_packet.
6919  *	sp:	SRB pointer.
6920  *
6921  * Returns:
6922  *	FC_SUCCESS - the packet was accepted for transport.
6923  *	FC_TRANSPORT_ERROR - a transport error occurred.
6924  *
6925  * Context:
6926  *	Kernel context.
6927  */
6928 static int
6929 ql_fcp_data_rsp(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6930 {
6931 	port_id_t	d_id;
6932 	ql_tgt_t	*tq;
6933 	uint16_t	lun;
6934 
6935 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6936 
6937 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6938 	tq = ql_d_id_to_queue(ha, d_id);
6939 
6940 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6941 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6942 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6943 
6944 	if (tq != NULL &&
6945 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6946 		sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6947 
6948 		/*
6949 		 * Setup for commands with data transfer
6950 		 */
6951 		if (pkt->pkt_cmdlen != 0 &&
6952 		    ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) ||
6953 		    sp->flags & SRB_FCP_RSP_PKT)) {
6954 			(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
6955 			    DDI_DMA_SYNC_FORDEV);
6956 		}
6957 
6958 		/* Setup IOCB count. */
6959 		sp->iocb = ha->ctio_cmd;
6960 		sp->req_cnt = 1;
6961 
6962 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6963 
6964 		return (ql_start_cmd(ha, tq, pkt, sp));
6965 	} else {
6966 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6967 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6968 
6969 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6970 			ql_awaken_task_daemon(ha, sp, 0, 0);
6971 	}
6972 
6973 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6974 
6975 	return (FC_SUCCESS);
6976 }
6977 
6978 /*
6979  * ql_fc_services
6980  *	Process fibre channel services (name server).
6981  *
6982  * Input:
6983  *	ha:	adapter state pointer.
6984  *	pkt:	pointer to fc_packet.
6985  *
6986  * Returns:
6987  *	FC_SUCCESS - the packet was accepted for transport.
6988  *	FC_TRANSPORT_ERROR - a transport error occurred.
6989  *
6990  * Context:
6991  *	Kernel context.
6992  */
6993 static int
6994 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
6995 {
6996 	uint32_t	cnt;
6997 	fc_ct_header_t	hdr;
6998 	la_els_rjt_t	rjt;
6999 	port_id_t	d_id;
7000 	ql_tgt_t	*tq;
7001 	ql_srb_t	*sp;
7002 	int		rval;
7003 
7004 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7005 
7006 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7007 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7008 
7009 	bzero(&rjt, sizeof (rjt));
7010 
7011 	/* Do some sanity checks */
7012 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7013 	    sizeof (fc_ct_header_t));
7014 	ASSERT(cnt <= (uint32_t)pkt->pkt_rsplen);
7015 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
7016 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7017 		    pkt->pkt_rsplen);
7018 		return (FC_ELS_MALFORMED);
7019 	}
7020 
7021 	switch (hdr.ct_fcstype) {
7022 	case FCSTYPE_DIRECTORY:
7023 	case FCSTYPE_MGMTSERVICE:
7024 		/* An FCA must make sure that the header is in big endian */
7025 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7026 
7027 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7028 		tq = ql_d_id_to_queue(ha, d_id);
7029 		sp = (ql_srb_t *)pkt->pkt_fca_private;
7030 		if (tq == NULL ||
7031 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7032 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
7033 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7034 			rval = QL_SUCCESS;
7035 			break;
7036 		}
7037 
7038 		/*
7039 		 * Services data is bound to pkt_cmd_dma
7040 		 */
7041 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7042 		    DDI_DMA_SYNC_FORDEV);
7043 
7044 		sp->flags |= SRB_MS_PKT;
7045 		sp->retry_count = 32;
7046 
7047 		/* Setup IOCB count. */
7048 		sp->iocb = ha->ms_cmd;
7049 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7050 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7051 			sp->req_cnt =
7052 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7053 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7054 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7055 			} else {
7056 				sp->req_cnt++;
7057 			}
7058 		} else {
7059 			sp->req_cnt = 1;
7060 		}
7061 		rval = ql_start_cmd(ha, tq, pkt, sp);
7062 
7063 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7064 		    ha->instance, rval);
7065 
7066 		return (rval);
7067 
7068 	default:
7069 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7070 		rval = QL_FUNCTION_PARAMETER_ERROR;
7071 		break;
7072 	}
7073 
7074 	if (rval != QL_SUCCESS) {
7075 
7076 		/* Build RJT. */
7077 		rjt.ls_code.ls_code = LA_ELS_RJT;
7078 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7079 
7080 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7081 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7082 
7083 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7084 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7085 	}
7086 
7087 	/* Do command callback. */
7088 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7089 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7090 		    0, 0);
7091 	}
7092 
7093 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7094 
7095 	return (FC_SUCCESS);
7096 }
7097 
7098 /*
7099  * ql_cthdr_endian
7100  *	Change endianess of ct passthrough header and payload.
7101  *
7102  * Input:
7103  *	acc_handle:	DMA buffer access handle.
7104  *	ct_hdr:		Pointer to header.
7105  *	restore:	Restore first flag.
7106  *
7107  * Context:
7108  *	Interrupt or Kernel context, no mailbox commands allowed.
7109  */
7110 void
7111 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7112     boolean_t restore)
7113 {
7114 	uint8_t		i, *bp;
7115 	fc_ct_header_t	hdr;
7116 	uint32_t	*hdrp = (uint32_t *)&hdr;
7117 
7118 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7119 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7120 
7121 	if (restore) {
7122 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7123 			*hdrp = BE_32(*hdrp);
7124 			hdrp++;
7125 		}
7126 	}
7127 
7128 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7129 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7130 
7131 		switch (hdr.ct_cmdrsp) {
7132 		case NS_GA_NXT:
7133 		case NS_GPN_ID:
7134 		case NS_GNN_ID:
7135 		case NS_GCS_ID:
7136 		case NS_GFT_ID:
7137 		case NS_GSPN_ID:
7138 		case NS_GPT_ID:
7139 		case NS_GID_FT:
7140 		case NS_GID_PT:
7141 		case NS_RPN_ID:
7142 		case NS_RNN_ID:
7143 		case NS_RSPN_ID:
7144 		case NS_DA_ID:
7145 			BIG_ENDIAN_32(bp);
7146 			break;
7147 		case NS_RFT_ID:
7148 		case NS_RCS_ID:
7149 		case NS_RPT_ID:
7150 			BIG_ENDIAN_32(bp);
7151 			bp += 4;
7152 			BIG_ENDIAN_32(bp);
7153 			break;
7154 		case NS_GNN_IP:
7155 		case NS_GIPA_IP:
7156 			BIG_ENDIAN(bp, 16);
7157 			break;
7158 		case NS_RIP_NN:
7159 			bp += 8;
7160 			BIG_ENDIAN(bp, 16);
7161 			break;
7162 		case NS_RIPA_NN:
7163 			bp += 8;
7164 			BIG_ENDIAN_64(bp);
7165 			break;
7166 		default:
7167 			break;
7168 		}
7169 	}
7170 
7171 	if (restore == B_FALSE) {
7172 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7173 			*hdrp = BE_32(*hdrp);
7174 			hdrp++;
7175 		}
7176 	}
7177 
7178 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7179 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7180 }
7181 
7182 /*
7183  * ql_start_cmd
7184  *	Finishes starting fibre channel protocol (FCP) command.
7185  *
7186  * Input:
7187  *	ha:	adapter state pointer.
7188  *	tq:	target queue pointer.
7189  *	pkt:	pointer to fc_packet.
7190  *	sp:	SRB pointer.
7191  *
7192  * Context:
7193  *	Kernel context.
7194  */
7195 static int
7196 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7197     ql_srb_t *sp)
7198 {
7199 	int		rval = FC_SUCCESS;
7200 	time_t		poll_wait = 0;
7201 	ql_lun_t	*lq = sp->lun_queue;
7202 
7203 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7204 
7205 	sp->handle = 0;
7206 
7207 	/* Set poll for finish. */
7208 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7209 		sp->flags |= SRB_POLL;
7210 		if (pkt->pkt_timeout == 0) {
7211 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7212 		}
7213 	}
7214 
7215 	/* Acquire device queue lock. */
7216 	DEVICE_QUEUE_LOCK(tq);
7217 
7218 	/*
7219 	 * If we need authentication, report device busy to
7220 	 * upper layers to retry later
7221 	 */
7222 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7223 		DEVICE_QUEUE_UNLOCK(tq);
7224 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7225 		    tq->d_id.b24);
7226 		return (FC_DEVICE_BUSY);
7227 	}
7228 
7229 	/* Insert command onto watchdog queue. */
7230 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7231 		ql_timeout_insert(ha, tq, sp);
7232 	} else {
7233 		/*
7234 		 * Run dump requests in polled mode as kernel threads
7235 		 * and interrupts may have been disabled.
7236 		 */
7237 		sp->flags |= SRB_POLL;
7238 		sp->init_wdg_q_time = 0;
7239 		sp->isp_timeout = 0;
7240 	}
7241 
7242 	/* If a polling command setup wait time. */
7243 	if (sp->flags & SRB_POLL) {
7244 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7245 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7246 		} else {
7247 			poll_wait = pkt->pkt_timeout;
7248 		}
7249 		ASSERT(poll_wait != 0);
7250 	}
7251 
7252 	if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7253 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7254 		/* Set ending status. */
7255 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7256 
7257 		/* Call done routine to handle completions. */
7258 		sp->cmd.next = NULL;
7259 		DEVICE_QUEUE_UNLOCK(tq);
7260 		ql_done(&sp->cmd);
7261 	} else {
7262 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7263 			int do_lip = 0;
7264 
7265 			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7266 
7267 			DEVICE_QUEUE_UNLOCK(tq);
7268 
7269 			ADAPTER_STATE_LOCK(ha);
7270 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7271 				ha->pha->lip_on_panic++;
7272 			}
7273 			ADAPTER_STATE_UNLOCK(ha);
7274 
7275 			if (!do_lip) {
7276 
7277 				/*
7278 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7279 				 * is helpful here. If a PLOGI fails for some
7280 				 * reason, you would get CS_PORT_LOGGED_OUT
7281 				 * or some such error; and we should get a
7282 				 * careful polled mode login kicked off inside
7283 				 * of this driver itself. You don't have FC
7284 				 * transport's services as all threads are
7285 				 * suspended, interrupts disabled, and so
7286 				 * on. Right now we do re-login if the packet
7287 				 * state isn't FC_PKT_SUCCESS.
7288 				 */
7289 				(void) ql_abort_isp(ha);
7290 			}
7291 
7292 			ql_start_iocb(ha, sp);
7293 		} else {
7294 			/* Add the command to the device queue */
7295 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7296 				ql_add_link_t(&lq->cmd, &sp->cmd);
7297 			} else {
7298 				ql_add_link_b(&lq->cmd, &sp->cmd);
7299 			}
7300 
7301 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7302 
7303 			/* Check whether next message can be processed */
7304 			ql_next(ha, lq);
7305 		}
7306 	}
7307 
7308 	/* If polling, wait for finish. */
7309 	if (poll_wait) {
7310 		ASSERT(sp->flags & SRB_POLL);
7311 
7312 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7313 			int	res;
7314 
7315 			res = ql_abort((opaque_t)ha, pkt, 0);
7316 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7317 				ASSERT(res == FC_OFFLINE ||
7318 				    res == FC_ABORT_FAILED);
7319 
7320 				DEVICE_QUEUE_LOCK(tq);
7321 				ql_remove_link(&lq->cmd, &sp->cmd);
7322 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7323 				DEVICE_QUEUE_UNLOCK(tq);
7324 			}
7325 		}
7326 
7327 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7328 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7329 			rval = FC_TRANSPORT_ERROR;
7330 		}
7331 
7332 		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
7333 		    SRB_IN_TOKEN_ARRAY)) == 0);
7334 
7335 		if (ddi_in_panic()) {
7336 			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7337 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7338 				port_id_t d_id;
7339 
7340 				/*
7341 				 * successful LOGIN implies by design
7342 				 * that PRLI also succeeded for disks
7343 				 * Note also that there is no special
7344 				 * mailbox command to send PRLI.
7345 				 */
7346 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7347 				(void) ql_login_port(ha, d_id);
7348 			}
7349 		}
7350 
7351 		/*
7352 		 * This should only happen during CPR dumping
7353 		 */
7354 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7355 		    pkt->pkt_comp) {
7356 			ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
7357 			sp->flags &= ~SRB_POLL;
7358 			(*pkt->pkt_comp)(pkt);
7359 		}
7360 	}
7361 
7362 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7363 
7364 	return (rval);
7365 }
7366 
7367 /*
7368  * ql_poll_cmd
7369  *	Polls commands for completion.
7370  *
7371  * Input:
7372  *	ha = adapter state pointer.
7373  *	sp = SRB command pointer.
7374  *	poll_wait = poll wait time in seconds.
7375  *
7376  * Returns:
7377  *	QL local function return status code.
7378  *
7379  * Context:
7380  *	Kernel context.
7381  */
7382 static int
7383 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7384 {
7385 	int			rval = QL_SUCCESS;
7386 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7387 	ql_adapter_state_t	*ha = vha->pha;
7388 
7389 	while (sp->flags & SRB_POLL) {
7390 
7391 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7392 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7393 
7394 			/* If waiting for restart, do it now. */
7395 			if (ha->port_retry_timer != 0) {
7396 				ADAPTER_STATE_LOCK(ha);
7397 				ha->port_retry_timer = 0;
7398 				ADAPTER_STATE_UNLOCK(ha);
7399 
7400 				TASK_DAEMON_LOCK(ha);
7401 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7402 				TASK_DAEMON_UNLOCK(ha);
7403 			}
7404 
7405 			if ((CFG_IST(ha, CFG_CTRL_2425) ?
7406 			    RD32_IO_REG(ha, istatus) :
7407 			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7408 				(void) ql_isr((caddr_t)ha);
7409 				INTR_LOCK(ha);
7410 				ha->intr_claimed = TRUE;
7411 				INTR_UNLOCK(ha);
7412 			}
7413 
7414 			/*
7415 			 * Call task thread function in case the
7416 			 * daemon is not running.
7417 			 */
7418 			TASK_DAEMON_LOCK(ha);
7419 
7420 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7421 			    QL_TASK_PENDING(ha)) {
7422 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7423 				ql_task_thread(ha);
7424 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7425 			}
7426 
7427 			TASK_DAEMON_UNLOCK(ha);
7428 		}
7429 
7430 		if (msecs_left < 10) {
7431 			rval = QL_FUNCTION_TIMEOUT;
7432 			break;
7433 		}
7434 
7435 		/*
7436 		 * Polling interval is 10 milli seconds; Increasing
7437 		 * the polling interval to seconds since disk IO
7438 		 * timeout values are ~60 seconds is tempting enough,
7439 		 * but CPR dump time increases, and so will the crash
7440 		 * dump time; Don't toy with the settings without due
7441 		 * consideration for all the scenarios that will be
7442 		 * impacted.
7443 		 */
7444 		ql_delay(ha, 10000);
7445 		msecs_left -= 10;
7446 	}
7447 
7448 	return (rval);
7449 }
7450 
7451 /*
7452  * ql_next
7453  *	Retrieve and process next job in the device queue.
7454  *
7455  * Input:
7456  *	ha:	adapter state pointer.
7457  *	lq:	LUN queue pointer.
7458  *	DEVICE_QUEUE_LOCK must be already obtained.
7459  *
7460  * Output:
7461  *	Releases DEVICE_QUEUE_LOCK upon exit.
7462  *
7463  * Context:
7464  *	Interrupt or Kernel context, no mailbox commands allowed.
7465  */
7466 void
7467 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7468 {
7469 	ql_srb_t		*sp;
7470 	ql_link_t		*link;
7471 	ql_tgt_t		*tq = lq->target_queue;
7472 	ql_adapter_state_t	*ha = vha->pha;
7473 
7474 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7475 
7476 	if (ddi_in_panic()) {
7477 		DEVICE_QUEUE_UNLOCK(tq);
7478 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7479 		    ha->instance);
7480 		return;
7481 	}
7482 
7483 	while ((link = lq->cmd.first) != NULL) {
7484 		sp = link->base_address;
7485 
7486 		/* Exit if can not start commands. */
7487 		if (DRIVER_SUSPENDED(ha) ||
7488 		    (ha->flags & ONLINE) == 0 ||
7489 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7490 		    sp->flags & SRB_ABORT ||
7491 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7492 		    TQF_QUEUE_SUSPENDED)) {
7493 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7494 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7495 			    ha->task_daemon_flags, tq->flags, sp->flags,
7496 			    ha->flags, tq->loop_id);
7497 			break;
7498 		}
7499 
7500 		/*
7501 		 * Find out the LUN number for untagged command use.
7502 		 * If there is an untagged command pending for the LUN,
7503 		 * we would not submit another untagged command
7504 		 * or if reached LUN execution throttle.
7505 		 */
7506 		if (sp->flags & SRB_FCP_CMD_PKT) {
7507 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7508 			    lq->lun_outcnt >= ha->execution_throttle) {
7509 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7510 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7511 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7512 				break;
7513 			}
7514 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7515 			    FCP_QTYPE_UNTAGGED) {
7516 				/*
7517 				 * Set the untagged-flag for the LUN
7518 				 * so that no more untagged commands
7519 				 * can be submitted for this LUN.
7520 				 */
7521 				lq->flags |= LQF_UNTAGGED_PENDING;
7522 			}
7523 
7524 			/* Count command as sent. */
7525 			lq->lun_outcnt++;
7526 		}
7527 
7528 		/* Remove srb from device queue. */
7529 		ql_remove_link(&lq->cmd, &sp->cmd);
7530 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7531 
7532 		tq->outcnt++;
7533 
7534 		ql_start_iocb(vha, sp);
7535 	}
7536 
7537 	/* Release device queue lock. */
7538 	DEVICE_QUEUE_UNLOCK(tq);
7539 
7540 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7541 }
7542 
7543 /*
7544  * ql_done
7545  *	Process completed commands.
7546  *
7547  * Input:
7548  *	link:	first command link in chain.
7549  *
7550  * Context:
7551  *	Interrupt or Kernel context, no mailbox commands allowed.
7552  */
7553 void
7554 ql_done(ql_link_t *link)
7555 {
7556 	ql_adapter_state_t	*ha;
7557 	ql_link_t		*next_link;
7558 	ql_srb_t		*sp;
7559 	ql_tgt_t		*tq;
7560 	ql_lun_t		*lq;
7561 
7562 	QL_PRINT_3(CE_CONT, "started\n");
7563 
7564 	for (; link != NULL; link = next_link) {
7565 		next_link = link->next;
7566 		sp = link->base_address;
7567 		ha = sp->ha;
7568 
7569 		if (sp->flags & SRB_UB_CALLBACK) {
7570 			QL_UB_LOCK(ha);
7571 			if (sp->flags & SRB_UB_IN_ISP) {
7572 				if (ha->ub_outcnt != 0) {
7573 					ha->ub_outcnt--;
7574 				}
7575 				QL_UB_UNLOCK(ha);
7576 				ql_isp_rcvbuf(ha);
7577 				QL_UB_LOCK(ha);
7578 			}
7579 			QL_UB_UNLOCK(ha);
7580 			ql_awaken_task_daemon(ha, sp, 0, 0);
7581 		} else {
7582 			/* Free outstanding command slot. */
7583 			if (sp->handle != 0) {
7584 				ha->outstanding_cmds[
7585 				    sp->handle & OSC_INDEX_MASK] = NULL;
7586 				sp->handle = 0;
7587 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7588 			}
7589 
7590 			/* Acquire device queue lock. */
7591 			lq = sp->lun_queue;
7592 			tq = lq->target_queue;
7593 			DEVICE_QUEUE_LOCK(tq);
7594 
7595 			/* Decrement outstanding commands on device. */
7596 			if (tq->outcnt != 0) {
7597 				tq->outcnt--;
7598 			}
7599 
7600 			if (sp->flags & SRB_FCP_CMD_PKT) {
7601 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7602 				    FCP_QTYPE_UNTAGGED) {
7603 					/*
7604 					 * Clear the flag for this LUN so that
7605 					 * untagged commands can be submitted
7606 					 * for it.
7607 					 */
7608 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7609 				}
7610 
7611 				if (lq->lun_outcnt != 0) {
7612 					lq->lun_outcnt--;
7613 				}
7614 			}
7615 
7616 			/* Reset port down retry count on good completion. */
7617 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7618 				tq->port_down_retry_count =
7619 				    ha->port_down_retry_count;
7620 				tq->qfull_retry_count = ha->qfull_retry_count;
7621 			}
7622 
7623 			/* Place request back on top of target command queue */
7624 			if ((sp->flags & SRB_MS_PKT ||
7625 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7626 			    sp->flags & SRB_RETRY &&
7627 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7628 			    sp->wdg_q_time > 1)) {
7629 				sp->flags &= ~(SRB_ISP_STARTED |
7630 				    SRB_ISP_COMPLETED | SRB_RETRY);
7631 
7632 				/* Reset watchdog timer */
7633 				sp->wdg_q_time = sp->init_wdg_q_time;
7634 
7635 				/* Issue marker command on reset status. */
7636 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7637 				    (sp->pkt->pkt_reason == CS_RESET ||
7638 				    (CFG_IST(ha, CFG_CTRL_2425) &&
7639 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7640 					(void) ql_marker(ha, tq->loop_id, 0,
7641 					    MK_SYNC_ID);
7642 				}
7643 
7644 				ql_add_link_t(&lq->cmd, &sp->cmd);
7645 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7646 				ql_next(ha, lq);
7647 			} else {
7648 				/* Remove command from watchdog queue. */
7649 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7650 					ql_remove_link(&tq->wdg, &sp->wdg);
7651 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7652 				}
7653 
7654 				if (lq->cmd.first != NULL) {
7655 					ql_next(ha, lq);
7656 				} else {
7657 					/* Release LU queue specific lock. */
7658 					DEVICE_QUEUE_UNLOCK(tq);
7659 					if (ha->pha->pending_cmds.first !=
7660 					    NULL) {
7661 						ql_start_iocb(ha, NULL);
7662 					}
7663 				}
7664 
7665 				/* Sync buffers if required.  */
7666 				if (sp->flags & SRB_MS_PKT) {
7667 					(void) ddi_dma_sync(
7668 					    sp->pkt->pkt_resp_dma,
7669 					    0, 0, DDI_DMA_SYNC_FORCPU);
7670 				}
7671 
7672 				/* Map ISP completion codes. */
7673 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7674 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7675 				switch (sp->pkt->pkt_reason) {
7676 				case CS_COMPLETE:
7677 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7678 					break;
7679 				case CS_RESET:
7680 					/* Issue marker command. */
7681 					if (!(ha->task_daemon_flags &
7682 					    LOOP_DOWN)) {
7683 						(void) ql_marker(ha,
7684 						    tq->loop_id, 0,
7685 						    MK_SYNC_ID);
7686 					}
7687 					sp->pkt->pkt_state =
7688 					    FC_PKT_PORT_OFFLINE;
7689 					sp->pkt->pkt_reason =
7690 					    FC_REASON_ABORTED;
7691 					break;
7692 				case CS_RESOUCE_UNAVAILABLE:
7693 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7694 					sp->pkt->pkt_reason =
7695 					    FC_REASON_PKT_BUSY;
7696 					break;
7697 
7698 				case CS_TIMEOUT:
7699 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7700 					sp->pkt->pkt_reason =
7701 					    FC_REASON_HW_ERROR;
7702 					break;
7703 				case CS_DATA_OVERRUN:
7704 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7705 					sp->pkt->pkt_reason =
7706 					    FC_REASON_OVERRUN;
7707 					break;
7708 				case CS_PORT_UNAVAILABLE:
7709 				case CS_PORT_LOGGED_OUT:
7710 					sp->pkt->pkt_state =
7711 					    FC_PKT_PORT_OFFLINE;
7712 					sp->pkt->pkt_reason =
7713 					    FC_REASON_LOGIN_REQUIRED;
7714 					ql_send_logo(ha, tq, NULL);
7715 					break;
7716 				case CS_PORT_CONFIG_CHG:
7717 					sp->pkt->pkt_state =
7718 					    FC_PKT_PORT_OFFLINE;
7719 					sp->pkt->pkt_reason =
7720 					    FC_REASON_OFFLINE;
7721 					break;
7722 				case CS_QUEUE_FULL:
7723 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7724 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7725 					break;
7726 
7727 				case CS_ABORTED:
7728 					DEVICE_QUEUE_LOCK(tq);
7729 					if (tq->flags & (TQF_RSCN_RCVD |
7730 					    TQF_NEED_AUTHENTICATION)) {
7731 						sp->pkt->pkt_state =
7732 						    FC_PKT_PORT_OFFLINE;
7733 						sp->pkt->pkt_reason =
7734 						    FC_REASON_LOGIN_REQUIRED;
7735 					} else {
7736 						sp->pkt->pkt_state =
7737 						    FC_PKT_LOCAL_RJT;
7738 						sp->pkt->pkt_reason =
7739 						    FC_REASON_ABORTED;
7740 					}
7741 					DEVICE_QUEUE_UNLOCK(tq);
7742 					break;
7743 
7744 				case CS_TRANSPORT:
7745 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7746 					sp->pkt->pkt_reason =
7747 					    FC_PKT_TRAN_ERROR;
7748 					break;
7749 
7750 				case CS_DATA_UNDERRUN:
7751 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7752 					sp->pkt->pkt_reason =
7753 					    FC_REASON_UNDERRUN;
7754 					break;
7755 				case CS_DMA_ERROR:
7756 				case CS_BAD_PAYLOAD:
7757 				case CS_UNKNOWN:
7758 				case CS_CMD_FAILED:
7759 				default:
7760 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7761 					sp->pkt->pkt_reason =
7762 					    FC_REASON_HW_ERROR;
7763 					break;
7764 				}
7765 
7766 				/* Now call the pkt completion callback */
7767 				if (sp->flags & SRB_POLL) {
7768 					sp->flags &= ~SRB_POLL;
7769 				} else if (sp->pkt->pkt_comp) {
7770 					if (sp->pkt->pkt_tran_flags &
7771 					    FC_TRAN_IMMEDIATE_CB) {
7772 						(*sp->pkt->pkt_comp)(sp->pkt);
7773 					} else {
7774 						ql_awaken_task_daemon(ha, sp,
7775 						    0, 0);
7776 					}
7777 				}
7778 			}
7779 		}
7780 	}
7781 
7782 	QL_PRINT_3(CE_CONT, "done\n");
7783 }
7784 
7785 /*
7786  * ql_awaken_task_daemon
7787  *	Adds command completion callback to callback queue and/or
7788  *	awakens task daemon thread.
7789  *
7790  * Input:
7791  *	ha:		adapter state pointer.
7792  *	sp:		srb pointer.
7793  *	set_flags:	task daemon flags to set.
7794  *	reset_flags:	task daemon flags to reset.
7795  *
7796  * Context:
7797  *	Interrupt or Kernel context, no mailbox commands allowed.
7798  */
7799 void
7800 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7801     uint32_t set_flags, uint32_t reset_flags)
7802 {
7803 	ql_adapter_state_t	*ha = vha->pha;
7804 
7805 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7806 
7807 	/* Acquire task daemon lock. */
7808 	TASK_DAEMON_LOCK(ha);
7809 
7810 	if (set_flags & ISP_ABORT_NEEDED) {
7811 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7812 			set_flags &= ~ISP_ABORT_NEEDED;
7813 		}
7814 	}
7815 
7816 	ha->task_daemon_flags |= set_flags;
7817 	ha->task_daemon_flags &= ~reset_flags;
7818 
7819 	if (QL_DAEMON_SUSPENDED(ha)) {
7820 		if (sp != NULL) {
7821 			TASK_DAEMON_UNLOCK(ha);
7822 
7823 			/* Do callback. */
7824 			if (sp->flags & SRB_UB_CALLBACK) {
7825 				ql_unsol_callback(sp);
7826 			} else {
7827 				(*sp->pkt->pkt_comp)(sp->pkt);
7828 			}
7829 		} else {
7830 			if (!(curthread->t_flag & T_INTR_THREAD) &&
7831 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7832 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7833 				ql_task_thread(ha);
7834 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7835 			}
7836 
7837 			TASK_DAEMON_UNLOCK(ha);
7838 		}
7839 	} else {
7840 		if (sp != NULL) {
7841 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7842 		}
7843 
7844 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7845 			cv_broadcast(&ha->cv_task_daemon);
7846 		}
7847 		TASK_DAEMON_UNLOCK(ha);
7848 	}
7849 
7850 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7851 }
7852 
7853 /*
7854  * ql_task_daemon
7855  *	Thread that is awaken by the driver when a
7856  *	background needs to be done.
7857  *
7858  * Input:
7859  *	arg = adapter state pointer.
7860  *
7861  * Context:
7862  *	Kernel context.
7863  */
7864 static void
7865 ql_task_daemon(void *arg)
7866 {
7867 	ql_adapter_state_t	*ha = (void *)arg;
7868 
7869 	QL_PRINT_3(CE_CONT, "\n(%d): started\n", ha->instance);
7870 
7871 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7872 	    "ql_task_daemon");
7873 
7874 	/* Acquire task daemon lock. */
7875 	TASK_DAEMON_LOCK(ha);
7876 
7877 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7878 
7879 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7880 		ql_task_thread(ha);
7881 
7882 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7883 
7884 		/*
7885 		 * Before we wait on the conditional variable, we
7886 		 * need to check if STOP_FLG is set for us to terminate
7887 		 */
7888 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7889 			break;
7890 		}
7891 
7892 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7893 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7894 
7895 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7896 
7897 		/* If killed, stop task daemon */
7898 		if (cv_wait_sig(&ha->cv_task_daemon,
7899 		    &ha->task_daemon_mutex) == 0) {
7900 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7901 		}
7902 
7903 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7904 
7905 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7906 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7907 
7908 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7909 	}
7910 
7911 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7912 	    TASK_DAEMON_ALIVE_FLG);
7913 
7914 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7915 	CALLB_CPR_EXIT(&ha->cprinfo);
7916 
7917 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7918 
7919 	thread_exit();
7920 }
7921 
7922 /*
7923  * ql_task_thread
7924  *	Thread run by daemon.
7925  *
7926  * Input:
7927  *	ha = adapter state pointer.
7928  *	TASK_DAEMON_LOCK must be acquired prior to call.
7929  *
7930  * Context:
7931  *	Kernel context.
7932  */
7933 static void
7934 ql_task_thread(ql_adapter_state_t *ha)
7935 {
7936 	int			loop_again;
7937 	ql_srb_t		*sp;
7938 	ql_head_t		*head;
7939 	ql_link_t		*link;
7940 	caddr_t			msg;
7941 	ql_adapter_state_t	*vha;
7942 
7943 	do {
7944 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
7945 		    ha->instance, ha->task_daemon_flags);
7946 
7947 		loop_again = FALSE;
7948 
7949 		QL_PM_LOCK(ha);
7950 		if (ha->power_level != PM_LEVEL_D0 ||
7951 		    ha->flags & ADAPTER_SUSPENDED ||
7952 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
7953 		    DRIVER_STALL) ||
7954 		    (ha->flags & ONLINE) == 0) {
7955 			QL_PM_UNLOCK(ha);
7956 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
7957 			break;
7958 		}
7959 		QL_PM_UNLOCK(ha);
7960 
7961 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
7962 
7963 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
7964 			TASK_DAEMON_UNLOCK(ha);
7965 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
7966 			TASK_DAEMON_LOCK(ha);
7967 			loop_again = TRUE;
7968 		}
7969 
7970 		/* Idle Check. */
7971 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
7972 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
7973 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
7974 				TASK_DAEMON_UNLOCK(ha);
7975 				ql_idle_check(ha);
7976 				TASK_DAEMON_LOCK(ha);
7977 				loop_again = TRUE;
7978 			}
7979 		}
7980 
7981 		/* Crystal+ port#0 bypass transition */
7982 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
7983 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
7984 			TASK_DAEMON_UNLOCK(ha);
7985 			(void) ql_initiate_lip(ha);
7986 			TASK_DAEMON_LOCK(ha);
7987 			loop_again = TRUE;
7988 		}
7989 
7990 		/* Abort queues needed. */
7991 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
7992 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
7993 			TASK_DAEMON_UNLOCK(ha);
7994 			ql_abort_queues(ha);
7995 			TASK_DAEMON_LOCK(ha);
7996 		}
7997 
7998 		/* Not suspended, awaken waiting routines. */
7999 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8000 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8001 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8002 			cv_broadcast(&ha->cv_dr_suspended);
8003 			loop_again = TRUE;
8004 		}
8005 
8006 		/* Handle RSCN changes. */
8007 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8008 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8009 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8010 				TASK_DAEMON_UNLOCK(ha);
8011 				(void) ql_handle_rscn_update(vha);
8012 				TASK_DAEMON_LOCK(ha);
8013 				loop_again = TRUE;
8014 			}
8015 		}
8016 
8017 		/* Handle state changes. */
8018 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
8019 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8020 			    !(ha->task_daemon_flags &
8021 			    TASK_DAEMON_POWERING_DOWN)) {
8022 				/* Report state change. */
8023 				EL(vha, "state change = %xh\n", vha->state);
8024 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8025 
8026 				if (vha->task_daemon_flags &
8027 				    COMMAND_WAIT_NEEDED) {
8028 					vha->task_daemon_flags &=
8029 					    ~COMMAND_WAIT_NEEDED;
8030 					if (!(ha->task_daemon_flags &
8031 					    COMMAND_WAIT_ACTIVE)) {
8032 						ha->task_daemon_flags |=
8033 						    COMMAND_WAIT_ACTIVE;
8034 						TASK_DAEMON_UNLOCK(ha);
8035 						ql_cmd_wait(ha);
8036 						TASK_DAEMON_LOCK(ha);
8037 						ha->task_daemon_flags &=
8038 						    ~COMMAND_WAIT_ACTIVE;
8039 					}
8040 				}
8041 
8042 				msg = NULL;
8043 				if (FC_PORT_STATE_MASK(vha->state) ==
8044 				    FC_STATE_OFFLINE) {
8045 					if (vha->task_daemon_flags &
8046 					    STATE_ONLINE) {
8047 						if (ha->topology &
8048 						    QL_LOOP_CONNECTION) {
8049 							msg = "Loop OFFLINE";
8050 						} else {
8051 							msg = "Link OFFLINE";
8052 						}
8053 					}
8054 					vha->task_daemon_flags &=
8055 					    ~STATE_ONLINE;
8056 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8057 				    FC_STATE_LOOP) {
8058 					if (!(vha->task_daemon_flags &
8059 					    STATE_ONLINE)) {
8060 						msg = "Loop ONLINE";
8061 					}
8062 					vha->task_daemon_flags |= STATE_ONLINE;
8063 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8064 				    FC_STATE_ONLINE) {
8065 					if (!(vha->task_daemon_flags &
8066 					    STATE_ONLINE)) {
8067 						msg = "Link ONLINE";
8068 					}
8069 					vha->task_daemon_flags |= STATE_ONLINE;
8070 				} else {
8071 					msg = "Unknown Link state";
8072 				}
8073 
8074 				if (msg != NULL) {
8075 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8076 					    "%s", QL_NAME, ha->instance,
8077 					    vha->vp_index, msg);
8078 				}
8079 
8080 				if (vha->flags & FCA_BOUND) {
8081 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8082 					    "cb state=%xh\n", ha->instance,
8083 					    vha->vp_index, vha->state);
8084 					TASK_DAEMON_UNLOCK(ha);
8085 					(vha->bind_info.port_statec_cb)
8086 					    (vha->bind_info.port_handle,
8087 					    vha->state);
8088 					TASK_DAEMON_LOCK(ha);
8089 				}
8090 				loop_again = TRUE;
8091 			}
8092 		}
8093 
8094 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8095 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8096 			EL(ha, "processing LIP reset\n");
8097 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8098 			TASK_DAEMON_UNLOCK(ha);
8099 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8100 				if (vha->flags & FCA_BOUND) {
8101 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8102 					    "cb reset\n", ha->instance,
8103 					    vha->vp_index);
8104 					(vha->bind_info.port_statec_cb)
8105 					    (vha->bind_info.port_handle,
8106 					    FC_STATE_TARGET_PORT_RESET);
8107 				}
8108 			}
8109 			TASK_DAEMON_LOCK(ha);
8110 			loop_again = TRUE;
8111 		}
8112 
8113 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8114 		    FIRMWARE_UP)) {
8115 			/*
8116 			 * The firmware needs more unsolicited
8117 			 * buffers. We cannot allocate any new
8118 			 * buffers unless the ULP module requests
8119 			 * for new buffers. All we can do here is
8120 			 * to give received buffers from the pool
8121 			 * that is already allocated
8122 			 */
8123 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8124 			TASK_DAEMON_UNLOCK(ha);
8125 			ql_isp_rcvbuf(ha);
8126 			TASK_DAEMON_LOCK(ha);
8127 			loop_again = TRUE;
8128 		}
8129 
8130 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8131 			TASK_DAEMON_UNLOCK(ha);
8132 			(void) ql_abort_isp(ha);
8133 			TASK_DAEMON_LOCK(ha);
8134 			loop_again = TRUE;
8135 		}
8136 
8137 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8138 		    COMMAND_WAIT_NEEDED))) {
8139 			if (QL_IS_SET(ha->task_daemon_flags,
8140 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8141 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8142 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8143 					ha->task_daemon_flags |= RESET_ACTIVE;
8144 					TASK_DAEMON_UNLOCK(ha);
8145 					for (vha = ha; vha != NULL;
8146 					    vha = vha->vp_next) {
8147 						ql_rst_aen(vha);
8148 					}
8149 					TASK_DAEMON_LOCK(ha);
8150 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8151 					loop_again = TRUE;
8152 				}
8153 			}
8154 
8155 			if (QL_IS_SET(ha->task_daemon_flags,
8156 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8157 				if (!(ha->task_daemon_flags &
8158 				    LOOP_RESYNC_ACTIVE)) {
8159 					ha->task_daemon_flags |=
8160 					    LOOP_RESYNC_ACTIVE;
8161 					TASK_DAEMON_UNLOCK(ha);
8162 					(void) ql_loop_resync(ha);
8163 					TASK_DAEMON_LOCK(ha);
8164 					loop_again = TRUE;
8165 				}
8166 			}
8167 		}
8168 
8169 		/* Port retry needed. */
8170 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8171 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8172 			ADAPTER_STATE_LOCK(ha);
8173 			ha->port_retry_timer = 0;
8174 			ADAPTER_STATE_UNLOCK(ha);
8175 
8176 			TASK_DAEMON_UNLOCK(ha);
8177 			ql_restart_queues(ha);
8178 			TASK_DAEMON_LOCK(ha);
8179 			loop_again = B_TRUE;
8180 		}
8181 
8182 		/* iiDMA setting needed? */
8183 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8184 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8185 
8186 			TASK_DAEMON_UNLOCK(ha);
8187 			ql_iidma(ha);
8188 			TASK_DAEMON_LOCK(ha);
8189 			loop_again = B_TRUE;
8190 		}
8191 
8192 		head = &ha->callback_queue;
8193 		if (head->first != NULL) {
8194 			sp = head->first->base_address;
8195 			link = &sp->cmd;
8196 
8197 			/* Dequeue command. */
8198 			ql_remove_link(head, link);
8199 
8200 			/* Release task daemon lock. */
8201 			TASK_DAEMON_UNLOCK(ha);
8202 
8203 			ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
8204 			    SRB_IN_TOKEN_ARRAY)) == 0);
8205 
8206 			/* Do callback. */
8207 			if (sp->flags & SRB_UB_CALLBACK) {
8208 				ql_unsol_callback(sp);
8209 			} else {
8210 				(*sp->pkt->pkt_comp)(sp->pkt);
8211 			}
8212 
8213 			/* Acquire task daemon lock. */
8214 			TASK_DAEMON_LOCK(ha);
8215 
8216 			loop_again = TRUE;
8217 		}
8218 
8219 	} while (loop_again);
8220 }
8221 
8222 /*
8223  * ql_idle_check
8224  *	Test for adapter is alive and well.
8225  *
8226  * Input:
8227  *	ha:	adapter state pointer.
8228  *
8229  * Context:
8230  *	Kernel context.
8231  */
8232 static void
8233 ql_idle_check(ql_adapter_state_t *ha)
8234 {
8235 	ddi_devstate_t	state;
8236 	int		rval;
8237 	ql_mbx_data_t	mr;
8238 
8239 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8240 
8241 	/* Firmware Ready Test. */
8242 	rval = ql_get_firmware_state(ha, &mr);
8243 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8244 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8245 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8246 		state = ddi_get_devstate(ha->dip);
8247 		if (state == DDI_DEVSTATE_UP) {
8248 			/*EMPTY*/
8249 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8250 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8251 		}
8252 		TASK_DAEMON_LOCK(ha);
8253 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8254 			EL(ha, "fstate_ready, isp_abort_needed\n");
8255 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8256 		}
8257 		TASK_DAEMON_UNLOCK(ha);
8258 	}
8259 
8260 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8261 }
8262 
8263 /*
8264  * ql_unsol_callback
8265  *	Handle unsolicited buffer callbacks.
8266  *
8267  * Input:
8268  *	ha = adapter state pointer.
8269  *	sp = srb pointer.
8270  *
8271  * Context:
8272  *	Kernel context.
8273  */
8274 static void
8275 ql_unsol_callback(ql_srb_t *sp)
8276 {
8277 	fc_affected_id_t	*af;
8278 	fc_unsol_buf_t		*ubp;
8279 	uchar_t			r_ctl;
8280 	uchar_t			ls_code;
8281 	ql_tgt_t		*tq;
8282 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8283 
8284 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8285 
8286 	ubp = ha->ub_array[sp->handle];
8287 	r_ctl = ubp->ub_frame.r_ctl;
8288 	ls_code = ubp->ub_buffer[0];
8289 
8290 	if (sp->lun_queue == NULL) {
8291 		tq = NULL;
8292 	} else {
8293 		tq = sp->lun_queue->target_queue;
8294 	}
8295 
8296 	QL_UB_LOCK(ha);
8297 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8298 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8299 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8300 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8301 		sp->flags |= SRB_UB_IN_FCA;
8302 		QL_UB_UNLOCK(ha);
8303 		return;
8304 	}
8305 
8306 	/* Process RSCN */
8307 	if (sp->flags & SRB_UB_RSCN) {
8308 		int sendup = 1;
8309 
8310 		/*
8311 		 * Defer RSCN posting until commands return
8312 		 */
8313 		QL_UB_UNLOCK(ha);
8314 
8315 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8316 
8317 		/* Abort outstanding commands */
8318 		sendup = ql_process_rscn(ha, af);
8319 		if (sendup == 0) {
8320 
8321 			TASK_DAEMON_LOCK(ha);
8322 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8323 			TASK_DAEMON_UNLOCK(ha);
8324 
8325 			/*
8326 			 * Wait for commands to drain in F/W (doesn't take
8327 			 * more than a few milliseconds)
8328 			 */
8329 			ql_delay(ha, 10000);
8330 
8331 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8332 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8333 			    af->aff_format, af->aff_d_id);
8334 			return;
8335 		}
8336 
8337 		QL_UB_LOCK(ha);
8338 
8339 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8340 		    af->aff_format, af->aff_d_id);
8341 	}
8342 
8343 	/* Process UNSOL LOGO */
8344 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8345 		QL_UB_UNLOCK(ha);
8346 
8347 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8348 			TASK_DAEMON_LOCK(ha);
8349 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8350 			TASK_DAEMON_UNLOCK(ha);
8351 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8352 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8353 			return;
8354 		}
8355 
8356 		QL_UB_LOCK(ha);
8357 		EL(ha, "sending unsol logout for %xh to transport\n",
8358 		    ubp->ub_frame.s_id);
8359 	}
8360 
8361 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8362 	    SRB_UB_FCP);
8363 
8364 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8365 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8366 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8367 	}
8368 	QL_UB_UNLOCK(ha);
8369 
8370 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8371 	    ubp, sp->ub_type);
8372 
8373 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8374 }
8375 
8376 /*
8377  * ql_send_logo
8378  *
8379  * Input:
8380  *	ha:	adapter state pointer.
8381  *	tq:	target queue pointer.
8382  *	done_q:	done queue pointer.
8383  *
8384  * Context:
8385  *	Interrupt or Kernel context, no mailbox commands allowed.
8386  */
8387 void
8388 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8389 {
8390 	fc_unsol_buf_t		*ubp;
8391 	ql_srb_t		*sp;
8392 	la_els_logo_t		*payload;
8393 	ql_adapter_state_t	*ha = vha->pha;
8394 
8395 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8396 		return;
8397 	}
8398 
8399 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8400 	    tq->d_id.b24);
8401 
8402 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8403 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8404 
8405 		/* Locate a buffer to use. */
8406 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8407 		if (ubp == NULL) {
8408 			EL(vha, "Failed, get_unsolicited_buffer\n");
8409 			return;
8410 		}
8411 
8412 		DEVICE_QUEUE_LOCK(tq);
8413 		tq->flags |= TQF_NEED_AUTHENTICATION;
8414 		tq->logout_sent++;
8415 		DEVICE_QUEUE_UNLOCK(tq);
8416 
8417 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8418 
8419 		sp = ubp->ub_fca_private;
8420 
8421 		/* Set header. */
8422 		ubp->ub_frame.d_id = vha->d_id.b24;
8423 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8424 		ubp->ub_frame.s_id = tq->d_id.b24;
8425 		ubp->ub_frame.rsvd = 0;
8426 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8427 		    F_CTL_SEQ_INITIATIVE;
8428 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8429 		ubp->ub_frame.seq_cnt = 0;
8430 		ubp->ub_frame.df_ctl = 0;
8431 		ubp->ub_frame.seq_id = 0;
8432 		ubp->ub_frame.rx_id = 0xffff;
8433 		ubp->ub_frame.ox_id = 0xffff;
8434 
8435 		/* set payload. */
8436 		payload = (la_els_logo_t *)ubp->ub_buffer;
8437 		bzero(payload, sizeof (la_els_logo_t));
8438 		/* Make sure ls_code in payload is always big endian */
8439 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8440 		ubp->ub_buffer[1] = 0;
8441 		ubp->ub_buffer[2] = 0;
8442 		ubp->ub_buffer[3] = 0;
8443 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8444 		    &payload->nport_ww_name.raw_wwn[0], 8);
8445 		payload->nport_id.port_id = tq->d_id.b24;
8446 
8447 		QL_UB_LOCK(ha);
8448 		sp->flags |= SRB_UB_CALLBACK;
8449 		QL_UB_UNLOCK(ha);
8450 		if (tq->lun_queues.first != NULL) {
8451 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8452 		} else {
8453 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8454 		}
8455 		if (done_q) {
8456 			ql_add_link_b(done_q, &sp->cmd);
8457 		} else {
8458 			ql_awaken_task_daemon(ha, sp, 0, 0);
8459 		}
8460 	}
8461 
8462 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8463 }
8464 
8465 static int
8466 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8467 {
8468 	port_id_t	d_id;
8469 	ql_srb_t	*sp;
8470 	ql_link_t	*link;
8471 	int		sendup = 1;
8472 
8473 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8474 
8475 	DEVICE_QUEUE_LOCK(tq);
8476 	if (tq->outcnt) {
8477 		DEVICE_QUEUE_UNLOCK(tq);
8478 		sendup = 0;
8479 		(void) ql_abort_device(ha, tq, 1);
8480 		ql_delay(ha, 10000);
8481 	} else {
8482 		DEVICE_QUEUE_UNLOCK(tq);
8483 		TASK_DAEMON_LOCK(ha);
8484 
8485 		for (link = ha->pha->callback_queue.first; link != NULL;
8486 		    link = link->next) {
8487 			sp = link->base_address;
8488 			if (sp->flags & SRB_UB_CALLBACK) {
8489 				continue;
8490 			}
8491 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8492 
8493 			if (tq->d_id.b24 == d_id.b24) {
8494 				sendup = 0;
8495 				break;
8496 			}
8497 		}
8498 
8499 		TASK_DAEMON_UNLOCK(ha);
8500 	}
8501 
8502 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8503 
8504 	return (sendup);
8505 }
8506 
8507 static int
8508 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8509 {
8510 	fc_unsol_buf_t		*ubp;
8511 	ql_srb_t		*sp;
8512 	la_els_logi_t		*payload;
8513 	class_svc_param_t	*class3_param;
8514 
8515 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8516 
8517 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8518 	    LOOP_DOWN)) {
8519 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8520 		return (QL_FUNCTION_FAILED);
8521 	}
8522 
8523 	/* Locate a buffer to use. */
8524 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8525 	if (ubp == NULL) {
8526 		EL(ha, "Failed\n");
8527 		return (QL_FUNCTION_FAILED);
8528 	}
8529 
8530 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8531 	    ha->instance, tq->d_id.b24);
8532 
8533 	sp = ubp->ub_fca_private;
8534 
8535 	/* Set header. */
8536 	ubp->ub_frame.d_id = ha->d_id.b24;
8537 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8538 	ubp->ub_frame.s_id = tq->d_id.b24;
8539 	ubp->ub_frame.rsvd = 0;
8540 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8541 	    F_CTL_SEQ_INITIATIVE;
8542 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8543 	ubp->ub_frame.seq_cnt = 0;
8544 	ubp->ub_frame.df_ctl = 0;
8545 	ubp->ub_frame.seq_id = 0;
8546 	ubp->ub_frame.rx_id = 0xffff;
8547 	ubp->ub_frame.ox_id = 0xffff;
8548 
8549 	/* set payload. */
8550 	payload = (la_els_logi_t *)ubp->ub_buffer;
8551 	bzero(payload, sizeof (payload));
8552 
8553 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8554 	payload->common_service.fcph_version = 0x2006;
8555 	payload->common_service.cmn_features = 0x8800;
8556 
8557 	CFG_IST(ha, CFG_CTRL_2425) ?
8558 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8559 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8560 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8561 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8562 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8563 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8564 
8565 	payload->common_service.conc_sequences = 0xff;
8566 	payload->common_service.relative_offset = 0x03;
8567 	payload->common_service.e_d_tov = 0x7d0;
8568 
8569 	bcopy((void *)&tq->port_name[0],
8570 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8571 
8572 	bcopy((void *)&tq->node_name[0],
8573 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8574 
8575 	class3_param = (class_svc_param_t *)&payload->class_3;
8576 	class3_param->class_valid_svc_opt = 0x8000;
8577 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8578 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8579 	class3_param->conc_sequences = tq->class3_conc_sequences;
8580 	class3_param->open_sequences_per_exch =
8581 	    tq->class3_open_sequences_per_exch;
8582 
8583 	QL_UB_LOCK(ha);
8584 	sp->flags |= SRB_UB_CALLBACK;
8585 	QL_UB_UNLOCK(ha);
8586 
8587 	if (done_q) {
8588 		ql_add_link_b(done_q, &sp->cmd);
8589 	} else {
8590 		ql_awaken_task_daemon(ha, sp, 0, 0);
8591 	}
8592 
8593 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8594 
8595 	return (QL_SUCCESS);
8596 }
8597 
8598 /*
8599  * Abort outstanding commands in the Firmware, clear internally
8600  * queued commands in the driver, Synchronize the target with
8601  * the Firmware
8602  */
8603 int
8604 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8605 {
8606 	ql_link_t	*link, *link2;
8607 	ql_lun_t	*lq;
8608 	int		rval = QL_SUCCESS;
8609 	ql_srb_t	*sp;
8610 	ql_head_t	done_q = { NULL, NULL };
8611 
8612 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8613 
8614 	/*
8615 	 * First clear, internally queued commands
8616 	 */
8617 	DEVICE_QUEUE_LOCK(tq);
8618 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8619 		lq = link->base_address;
8620 
8621 		link2 = lq->cmd.first;
8622 		while (link2 != NULL) {
8623 			sp = link2->base_address;
8624 			link2 = link2->next;
8625 
8626 			if (sp->flags & SRB_ABORT) {
8627 				continue;
8628 			}
8629 
8630 			/* Remove srb from device command queue. */
8631 			ql_remove_link(&lq->cmd, &sp->cmd);
8632 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8633 
8634 			/* Set ending status. */
8635 			sp->pkt->pkt_reason = CS_ABORTED;
8636 
8637 			/* Call done routine to handle completions. */
8638 			ql_add_link_b(&done_q, &sp->cmd);
8639 		}
8640 	}
8641 	DEVICE_QUEUE_UNLOCK(tq);
8642 
8643 	if (done_q.first != NULL) {
8644 		ql_done(done_q.first);
8645 	}
8646 
8647 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8648 		rval = ql_abort_target(ha, tq, 0);
8649 	}
8650 
8651 	if (rval != QL_SUCCESS) {
8652 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8653 	} else {
8654 		/*EMPTY*/
8655 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8656 		    ha->vp_index);
8657 	}
8658 
8659 	return (rval);
8660 }
8661 
8662 /*
8663  * ql_rcv_rscn_els
8664  *	Processes received RSCN extended link service.
8665  *
8666  * Input:
8667  *	ha:	adapter state pointer.
8668  *	mb:	array containing input mailbox registers.
8669  *	done_q:	done queue pointer.
8670  *
8671  * Context:
8672  *	Interrupt or Kernel context, no mailbox commands allowed.
8673  */
8674 void
8675 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8676 {
8677 	fc_unsol_buf_t		*ubp;
8678 	ql_srb_t		*sp;
8679 	fc_rscn_t		*rn;
8680 	fc_affected_id_t	*af;
8681 	port_id_t		d_id;
8682 
8683 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8684 
8685 	/* Locate a buffer to use. */
8686 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8687 	if (ubp != NULL) {
8688 		sp = ubp->ub_fca_private;
8689 
8690 		/* Set header. */
8691 		ubp->ub_frame.d_id = ha->d_id.b24;
8692 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8693 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8694 		ubp->ub_frame.rsvd = 0;
8695 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8696 		    F_CTL_SEQ_INITIATIVE;
8697 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8698 		ubp->ub_frame.seq_cnt = 0;
8699 		ubp->ub_frame.df_ctl = 0;
8700 		ubp->ub_frame.seq_id = 0;
8701 		ubp->ub_frame.rx_id = 0xffff;
8702 		ubp->ub_frame.ox_id = 0xffff;
8703 
8704 		/* set payload. */
8705 		rn = (fc_rscn_t *)ubp->ub_buffer;
8706 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8707 
8708 		rn->rscn_code = LA_ELS_RSCN;
8709 		rn->rscn_len = 4;
8710 		rn->rscn_payload_len = 8;
8711 		d_id.b.al_pa = LSB(mb[2]);
8712 		d_id.b.area = MSB(mb[2]);
8713 		d_id.b.domain =	LSB(mb[1]);
8714 		af->aff_d_id = d_id.b24;
8715 		af->aff_format = MSB(mb[1]);
8716 
8717 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8718 		    af->aff_d_id);
8719 
8720 		ql_update_rscn(ha, af);
8721 
8722 		QL_UB_LOCK(ha);
8723 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8724 		QL_UB_UNLOCK(ha);
8725 		ql_add_link_b(done_q, &sp->cmd);
8726 	}
8727 
8728 	if (ubp == NULL) {
8729 		EL(ha, "Failed, get_unsolicited_buffer\n");
8730 	} else {
8731 		/*EMPTY*/
8732 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8733 	}
8734 }
8735 
8736 /*
8737  * ql_update_rscn
8738  *	Update devices from received RSCN.
8739  *
8740  * Input:
8741  *	ha:	adapter state pointer.
8742  *	af:	pointer to RSCN data.
8743  *
8744  * Context:
8745  *	Interrupt or Kernel context, no mailbox commands allowed.
8746  */
8747 static void
8748 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8749 {
8750 	ql_link_t	*link;
8751 	uint16_t	index;
8752 	ql_tgt_t	*tq;
8753 
8754 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8755 
8756 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8757 		port_id_t d_id;
8758 
8759 		d_id.r.rsvd_1 = 0;
8760 		d_id.b24 = af->aff_d_id;
8761 
8762 		tq = ql_d_id_to_queue(ha, d_id);
8763 		if (tq) {
8764 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8765 			DEVICE_QUEUE_LOCK(tq);
8766 			tq->flags |= TQF_RSCN_RCVD;
8767 			DEVICE_QUEUE_UNLOCK(tq);
8768 		}
8769 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8770 		    ha->instance);
8771 
8772 		return;
8773 	}
8774 
8775 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8776 		for (link = ha->dev[index].first; link != NULL;
8777 		    link = link->next) {
8778 			tq = link->base_address;
8779 
8780 			switch (af->aff_format) {
8781 			case FC_RSCN_FABRIC_ADDRESS:
8782 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8783 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8784 					    tq->d_id.b24);
8785 					DEVICE_QUEUE_LOCK(tq);
8786 					tq->flags |= TQF_RSCN_RCVD;
8787 					DEVICE_QUEUE_UNLOCK(tq);
8788 				}
8789 				break;
8790 
8791 			case FC_RSCN_AREA_ADDRESS:
8792 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8793 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8794 					    tq->d_id.b24);
8795 					DEVICE_QUEUE_LOCK(tq);
8796 					tq->flags |= TQF_RSCN_RCVD;
8797 					DEVICE_QUEUE_UNLOCK(tq);
8798 				}
8799 				break;
8800 
8801 			case FC_RSCN_DOMAIN_ADDRESS:
8802 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8803 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8804 					    tq->d_id.b24);
8805 					DEVICE_QUEUE_LOCK(tq);
8806 					tq->flags |= TQF_RSCN_RCVD;
8807 					DEVICE_QUEUE_UNLOCK(tq);
8808 				}
8809 				break;
8810 
8811 			default:
8812 				break;
8813 			}
8814 		}
8815 	}
8816 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8817 }
8818 
8819 /*
8820  * ql_process_rscn
8821  *
8822  * Input:
8823  *	ha:	adapter state pointer.
8824  *	af:	RSCN payload pointer.
8825  *
8826  * Context:
8827  *	Kernel context.
8828  */
8829 static int
8830 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8831 {
8832 	int		sendit;
8833 	int		sendup = 1;
8834 	ql_link_t	*link;
8835 	uint16_t	index;
8836 	ql_tgt_t	*tq;
8837 
8838 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8839 
8840 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8841 		port_id_t d_id;
8842 
8843 		d_id.r.rsvd_1 = 0;
8844 		d_id.b24 = af->aff_d_id;
8845 
8846 		tq = ql_d_id_to_queue(ha, d_id);
8847 		if (tq) {
8848 			sendup = ql_process_rscn_for_device(ha, tq);
8849 		}
8850 
8851 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8852 
8853 		return (sendup);
8854 	}
8855 
8856 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8857 		for (link = ha->dev[index].first; link != NULL;
8858 		    link = link->next) {
8859 
8860 			tq = link->base_address;
8861 			if (tq == NULL) {
8862 				continue;
8863 			}
8864 
8865 			switch (af->aff_format) {
8866 			case FC_RSCN_FABRIC_ADDRESS:
8867 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8868 					sendit = ql_process_rscn_for_device(
8869 					    ha, tq);
8870 					if (sendup) {
8871 						sendup = sendit;
8872 					}
8873 				}
8874 				break;
8875 
8876 			case FC_RSCN_AREA_ADDRESS:
8877 				if ((tq->d_id.b24 & 0xffff00) ==
8878 				    af->aff_d_id) {
8879 					sendit = ql_process_rscn_for_device(
8880 					    ha, tq);
8881 
8882 					if (sendup) {
8883 						sendup = sendit;
8884 					}
8885 				}
8886 				break;
8887 
8888 			case FC_RSCN_DOMAIN_ADDRESS:
8889 				if ((tq->d_id.b24 & 0xff0000) ==
8890 				    af->aff_d_id) {
8891 					sendit = ql_process_rscn_for_device(
8892 					    ha, tq);
8893 
8894 					if (sendup) {
8895 						sendup = sendit;
8896 					}
8897 				}
8898 				break;
8899 
8900 			default:
8901 				break;
8902 			}
8903 		}
8904 	}
8905 
8906 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8907 
8908 	return (sendup);
8909 }
8910 
8911 /*
8912  * ql_process_rscn_for_device
8913  *
8914  * Input:
8915  *	ha:	adapter state pointer.
8916  *	tq:	target queue pointer.
8917  *
8918  * Context:
8919  *	Kernel context.
8920  */
8921 static int
8922 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8923 {
8924 	int sendup = 1;
8925 
8926 	DEVICE_QUEUE_LOCK(tq);
8927 
8928 	/*
8929 	 * Let FCP-2 compliant devices continue I/Os
8930 	 * with their low level recoveries.
8931 	 */
8932 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
8933 	    (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
8934 		/*
8935 		 * Cause ADISC to go out
8936 		 */
8937 		DEVICE_QUEUE_UNLOCK(tq);
8938 
8939 		(void) ql_get_port_database(ha, tq, PDF_NONE);
8940 
8941 		DEVICE_QUEUE_LOCK(tq);
8942 		tq->flags &= ~TQF_RSCN_RCVD;
8943 
8944 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
8945 		if (tq->d_id.b24 != BROADCAST_ADDR) {
8946 			tq->flags |= TQF_NEED_AUTHENTICATION;
8947 		}
8948 
8949 		DEVICE_QUEUE_UNLOCK(tq);
8950 
8951 		(void) ql_abort_device(ha, tq, 1);
8952 
8953 		DEVICE_QUEUE_LOCK(tq);
8954 
8955 		if (tq->outcnt) {
8956 			sendup = 0;
8957 		} else {
8958 			tq->flags &= ~TQF_RSCN_RCVD;
8959 		}
8960 	} else {
8961 		tq->flags &= ~TQF_RSCN_RCVD;
8962 	}
8963 
8964 	if (sendup) {
8965 		if (tq->d_id.b24 != BROADCAST_ADDR) {
8966 			tq->flags |= TQF_NEED_AUTHENTICATION;
8967 		}
8968 	}
8969 
8970 	DEVICE_QUEUE_UNLOCK(tq);
8971 
8972 	return (sendup);
8973 }
8974 
8975 static int
8976 ql_handle_rscn_update(ql_adapter_state_t *ha)
8977 {
8978 	int			rval;
8979 	ql_tgt_t		*tq;
8980 	uint16_t		index, loop_id;
8981 	ql_dev_id_list_t	*list;
8982 	uint32_t		list_size;
8983 	port_id_t		d_id;
8984 	ql_mbx_data_t		mr;
8985 	ql_head_t		done_q = { NULL, NULL };
8986 
8987 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8988 
8989 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
8990 	list = kmem_zalloc(list_size, KM_SLEEP);
8991 	if (list == NULL) {
8992 		rval = QL_MEMORY_ALLOC_FAILED;
8993 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
8994 		return (rval);
8995 	}
8996 
8997 	/*
8998 	 * Get data from RISC code d_id list to init each device queue.
8999 	 */
9000 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9001 	if (rval != QL_SUCCESS) {
9002 		kmem_free(list, list_size);
9003 		EL(ha, "get_id_list failed=%xh\n", rval);
9004 		return (rval);
9005 	}
9006 
9007 	/* Acquire adapter state lock. */
9008 	ADAPTER_STATE_LOCK(ha);
9009 
9010 	/* Check for new devices */
9011 	for (index = 0; index < mr.mb[1]; index++) {
9012 		ql_dev_list(ha, list, index, &d_id, &loop_id);
9013 
9014 		if (VALID_DEVICE_ID(ha, loop_id)) {
9015 			d_id.r.rsvd_1 = 0;
9016 
9017 			tq = ql_d_id_to_queue(ha, d_id);
9018 			if (tq != NULL) {
9019 				continue;
9020 			}
9021 
9022 			tq = ql_dev_init(ha, d_id, loop_id);
9023 
9024 			/* Test for fabric device. */
9025 			if (d_id.b.domain != ha->d_id.b.domain ||
9026 			    d_id.b.area != ha->d_id.b.area) {
9027 				tq->flags |= TQF_FABRIC_DEVICE;
9028 			}
9029 
9030 			ADAPTER_STATE_UNLOCK(ha);
9031 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
9032 			    QL_SUCCESS) {
9033 				tq->loop_id = PORT_NO_LOOP_ID;
9034 			}
9035 			ADAPTER_STATE_LOCK(ha);
9036 
9037 			/*
9038 			 * Send up a PLOGI about the new device
9039 			 */
9040 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9041 				(void) ql_send_plogi(ha, tq, &done_q);
9042 			}
9043 		}
9044 	}
9045 
9046 	/* Release adapter state lock. */
9047 	ADAPTER_STATE_UNLOCK(ha);
9048 
9049 	if (done_q.first != NULL) {
9050 		ql_done(done_q.first);
9051 	}
9052 
9053 	kmem_free(list, list_size);
9054 
9055 	if (rval != QL_SUCCESS) {
9056 		EL(ha, "failed=%xh\n", rval);
9057 	} else {
9058 		/*EMPTY*/
9059 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9060 	}
9061 
9062 	return (rval);
9063 }
9064 
9065 /*
9066  * ql_free_unsolicited_buffer
9067  *	Frees allocated buffer.
9068  *
9069  * Input:
9070  *	ha = adapter state pointer.
9071  *	index = buffer array index.
9072  *	ADAPTER_STATE_LOCK must be already obtained.
9073  *
9074  * Context:
9075  *	Kernel context.
9076  */
9077 static void
9078 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9079 {
9080 	ql_srb_t	*sp;
9081 	int		status;
9082 
9083 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9084 
9085 	sp = ubp->ub_fca_private;
9086 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9087 		/* Disconnect IP from system buffers. */
9088 		if (ha->flags & IP_INITIALIZED) {
9089 			ADAPTER_STATE_UNLOCK(ha);
9090 			status = ql_shutdown_ip(ha);
9091 			ADAPTER_STATE_LOCK(ha);
9092 			if (status != QL_SUCCESS) {
9093 				cmn_err(CE_WARN,
9094 				    "!Qlogic %s(%d): Failed to shutdown IP",
9095 				    QL_NAME, ha->instance);
9096 				return;
9097 			}
9098 
9099 			ha->flags &= ~IP_ENABLED;
9100 		}
9101 
9102 		ql_free_phys(ha, &sp->ub_buffer);
9103 	} else {
9104 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9105 	}
9106 
9107 	kmem_free(sp, sizeof (ql_srb_t));
9108 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9109 
9110 	if (ha->ub_allocated != 0) {
9111 		ha->ub_allocated--;
9112 	}
9113 
9114 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9115 }
9116 
9117 /*
9118  * ql_get_unsolicited_buffer
9119  *	Locates a free unsolicited buffer.
9120  *
9121  * Input:
9122  *	ha = adapter state pointer.
9123  *	type = buffer type.
9124  *
9125  * Returns:
9126  *	Unsolicited buffer pointer.
9127  *
9128  * Context:
9129  *	Interrupt or Kernel context, no mailbox commands allowed.
9130  */
9131 fc_unsol_buf_t *
9132 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9133 {
9134 	fc_unsol_buf_t	*ubp;
9135 	ql_srb_t	*sp;
9136 	uint16_t	index;
9137 
9138 	/* Locate a buffer to use. */
9139 	ubp = NULL;
9140 
9141 	QL_UB_LOCK(ha);
9142 	for (index = 0; index < QL_UB_LIMIT; index++) {
9143 		ubp = ha->ub_array[index];
9144 		if (ubp != NULL) {
9145 			sp = ubp->ub_fca_private;
9146 			if ((sp->ub_type == type) &&
9147 			    (sp->flags & SRB_UB_IN_FCA) &&
9148 			    (!(sp->flags & (SRB_UB_CALLBACK |
9149 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9150 				sp->flags |= SRB_UB_ACQUIRED;
9151 				ubp->ub_resp_flags = 0;
9152 				break;
9153 			}
9154 			ubp = NULL;
9155 		}
9156 	}
9157 	QL_UB_UNLOCK(ha);
9158 
9159 	if (ubp) {
9160 		ubp->ub_resp_token = NULL;
9161 		ubp->ub_class = FC_TRAN_CLASS3;
9162 	}
9163 
9164 	return (ubp);
9165 }
9166 
9167 /*
9168  * ql_ub_frame_hdr
9169  *	Processes received unsolicited buffers from ISP.
9170  *
9171  * Input:
9172  *	ha:	adapter state pointer.
9173  *	tq:	target queue pointer.
9174  *	index:	unsolicited buffer array index.
9175  *	done_q:	done queue pointer.
9176  *
9177  * Returns:
9178  *	ql local function return status code.
9179  *
9180  * Context:
9181  *	Interrupt or Kernel context, no mailbox commands allowed.
9182  */
9183 int
9184 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9185     ql_head_t *done_q)
9186 {
9187 	fc_unsol_buf_t	*ubp;
9188 	ql_srb_t	*sp;
9189 	uint16_t	loop_id;
9190 	int		rval = QL_FUNCTION_FAILED;
9191 
9192 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9193 
9194 	QL_UB_LOCK(ha);
9195 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9196 		EL(ha, "Invalid buffer index=%xh\n", index);
9197 		QL_UB_UNLOCK(ha);
9198 		return (rval);
9199 	}
9200 
9201 	sp = ubp->ub_fca_private;
9202 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9203 		EL(ha, "buffer freed index=%xh\n", index);
9204 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9205 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9206 
9207 		sp->flags |= SRB_UB_IN_FCA;
9208 
9209 		QL_UB_UNLOCK(ha);
9210 		return (rval);
9211 	}
9212 
9213 	if ((sp->handle == index) &&
9214 	    (sp->flags & SRB_UB_IN_ISP) &&
9215 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9216 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9217 		/* set broadcast D_ID */
9218 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
9219 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9220 		if (tq->ub_loop_id == loop_id) {
9221 			if (ha->topology & QL_FL_PORT) {
9222 				ubp->ub_frame.d_id = 0x000000;
9223 			} else {
9224 				ubp->ub_frame.d_id = 0xffffff;
9225 			}
9226 		} else {
9227 			ubp->ub_frame.d_id = ha->d_id.b24;
9228 		}
9229 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9230 		ubp->ub_frame.rsvd = 0;
9231 		ubp->ub_frame.s_id = tq->d_id.b24;
9232 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9233 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9234 		ubp->ub_frame.df_ctl = 0;
9235 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9236 		ubp->ub_frame.rx_id = 0xffff;
9237 		ubp->ub_frame.ox_id = 0xffff;
9238 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9239 		    sp->ub_size : tq->ub_sequence_length;
9240 		ubp->ub_frame.ro = tq->ub_frame_ro;
9241 
9242 		tq->ub_sequence_length = (uint16_t)
9243 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9244 		tq->ub_frame_ro += ubp->ub_bufsize;
9245 		tq->ub_seq_cnt++;
9246 
9247 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9248 			if (tq->ub_seq_cnt == 1) {
9249 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9250 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9251 			} else {
9252 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9253 				    F_CTL_END_SEQ;
9254 			}
9255 			tq->ub_total_seg_cnt = 0;
9256 		} else if (tq->ub_seq_cnt == 1) {
9257 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9258 			    F_CTL_FIRST_SEQ;
9259 			ubp->ub_frame.df_ctl = 0x20;
9260 		}
9261 
9262 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9263 		    ha->instance, ubp->ub_frame.d_id);
9264 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9265 		    ha->instance, ubp->ub_frame.s_id);
9266 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9267 		    ha->instance, ubp->ub_frame.seq_cnt);
9268 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9269 		    ha->instance, ubp->ub_frame.seq_id);
9270 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9271 		    ha->instance, ubp->ub_frame.ro);
9272 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9273 		    ha->instance, ubp->ub_frame.f_ctl);
9274 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9275 		    ha->instance, ubp->ub_bufsize);
9276 		QL_DUMP_3(ubp->ub_buffer, 8,
9277 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9278 
9279 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9280 		ql_add_link_b(done_q, &sp->cmd);
9281 		rval = QL_SUCCESS;
9282 	} else {
9283 		if (sp->handle != index) {
9284 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9285 			    sp->handle);
9286 		}
9287 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9288 			EL(ha, "buffer was already in driver, index=%xh\n",
9289 			    index);
9290 		}
9291 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9292 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9293 			    index);
9294 		}
9295 		if (sp->flags & SRB_UB_ACQUIRED) {
9296 			EL(ha, "buffer was being used by driver, index=%xh\n",
9297 			    index);
9298 		}
9299 	}
9300 	QL_UB_UNLOCK(ha);
9301 
9302 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9303 
9304 	return (rval);
9305 }
9306 
9307 /*
9308  * ql_timer
9309  *	One second timer function.
9310  *
9311  * Input:
9312  *	ql_hba.first = first link in adapter list.
9313  *
9314  * Context:
9315  *	Interrupt context, no mailbox commands allowed.
9316  */
9317 static void
9318 ql_timer(void *arg)
9319 {
9320 	ql_link_t		*link;
9321 	uint32_t		set_flags;
9322 	uint32_t		reset_flags;
9323 	ql_adapter_state_t	*ha = NULL, *vha;
9324 
9325 	QL_PRINT_6(CE_CONT, "started\n");
9326 
9327 	/* Acquire global state lock. */
9328 	GLOBAL_STATE_LOCK();
9329 	if (ql_timer_timeout_id == NULL) {
9330 		/* Release global state lock. */
9331 		GLOBAL_STATE_UNLOCK();
9332 		return;
9333 	}
9334 
9335 	for (link = ql_hba.first; link != NULL; link = link->next) {
9336 		ha = link->base_address;
9337 
9338 		/* Skip adapter if suspended of stalled. */
9339 		ADAPTER_STATE_LOCK(ha);
9340 		if (ha->flags & ADAPTER_SUSPENDED ||
9341 		    ha->task_daemon_flags & DRIVER_STALL) {
9342 			ADAPTER_STATE_UNLOCK(ha);
9343 			continue;
9344 		}
9345 		ha->flags |= ADAPTER_TIMER_BUSY;
9346 		ADAPTER_STATE_UNLOCK(ha);
9347 
9348 		QL_PM_LOCK(ha);
9349 		if (ha->power_level != PM_LEVEL_D0) {
9350 			QL_PM_UNLOCK(ha);
9351 
9352 			ADAPTER_STATE_LOCK(ha);
9353 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9354 			ADAPTER_STATE_UNLOCK(ha);
9355 			continue;
9356 		}
9357 		ha->busy++;
9358 		QL_PM_UNLOCK(ha);
9359 
9360 		set_flags = 0;
9361 		reset_flags = 0;
9362 
9363 		/* Port retry timer handler. */
9364 		if (LOOP_READY(ha)) {
9365 			ADAPTER_STATE_LOCK(ha);
9366 			if (ha->port_retry_timer != 0) {
9367 				ha->port_retry_timer--;
9368 				if (ha->port_retry_timer == 0) {
9369 					set_flags |= PORT_RETRY_NEEDED;
9370 				}
9371 			}
9372 			ADAPTER_STATE_UNLOCK(ha);
9373 		}
9374 
9375 		/* Loop down timer handler. */
9376 		if (LOOP_RECONFIGURE(ha) == 0) {
9377 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9378 				ha->loop_down_timer--;
9379 				/*
9380 				 * give the firmware loop down dump flag
9381 				 * a chance to work.
9382 				 */
9383 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9384 					if (CFG_IST(ha,
9385 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9386 						(void) ql_binary_fw_dump(ha,
9387 						    TRUE);
9388 					}
9389 					EL(ha, "loop_down_reset, "
9390 					    "isp_abort_needed\n");
9391 					set_flags |= ISP_ABORT_NEEDED;
9392 				}
9393 			}
9394 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9395 				/* Command abort time handler. */
9396 				if (ha->loop_down_timer ==
9397 				    ha->loop_down_abort_time) {
9398 					ADAPTER_STATE_LOCK(ha);
9399 					ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9400 					ADAPTER_STATE_UNLOCK(ha);
9401 					set_flags |= ABORT_QUEUES_NEEDED;
9402 					EL(ha, "loop_down_abort_time, "
9403 					    "abort_queues_needed\n");
9404 				}
9405 
9406 				/* Watchdog timer handler. */
9407 				if (ha->watchdog_timer == 0) {
9408 					ha->watchdog_timer = WATCHDOG_TIME;
9409 				} else if (LOOP_READY(ha)) {
9410 					ha->watchdog_timer--;
9411 					if (ha->watchdog_timer == 0) {
9412 						for (vha = ha; vha != NULL;
9413 						    vha = vha->vp_next) {
9414 							ql_watchdog(vha,
9415 							    &set_flags,
9416 							    &reset_flags);
9417 						}
9418 						ha->watchdog_timer =
9419 						    WATCHDOG_TIME;
9420 					}
9421 				}
9422 			}
9423 		}
9424 
9425 		/* Idle timer handler. */
9426 		if (!DRIVER_SUSPENDED(ha)) {
9427 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9428 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9429 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9430 #endif
9431 				ha->idle_timer = 0;
9432 			}
9433 		}
9434 		if (set_flags != 0 || reset_flags != 0) {
9435 			ql_awaken_task_daemon(ha, NULL, set_flags,
9436 			    reset_flags);
9437 		}
9438 
9439 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9440 			ql_blink_led(ha);
9441 		}
9442 
9443 		/* Update the IO stats */
9444 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9445 			ha->xioctl->IOInputMByteCnt +=
9446 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9447 			ha->xioctl->IOInputByteCnt %= 0x100000;
9448 		}
9449 
9450 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9451 			ha->xioctl->IOOutputMByteCnt +=
9452 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9453 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9454 		}
9455 
9456 		ADAPTER_STATE_LOCK(ha);
9457 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9458 		ADAPTER_STATE_UNLOCK(ha);
9459 
9460 		QL_PM_LOCK(ha);
9461 		ha->busy--;
9462 		QL_PM_UNLOCK(ha);
9463 	}
9464 
9465 	/* Restart timer, if not being stopped. */
9466 	if (ql_timer_timeout_id != NULL) {
9467 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9468 	}
9469 
9470 	/* Release global state lock. */
9471 	GLOBAL_STATE_UNLOCK();
9472 
9473 	QL_PRINT_6(CE_CONT, "done\n");
9474 }
9475 
9476 /*
9477  * ql_timeout_insert
9478  *	Function used to insert a command block onto the
9479  *	watchdog timer queue.
9480  *
9481  *	Note: Must insure that pkt_time is not zero
9482  *			before calling ql_timeout_insert.
9483  *
9484  * Input:
9485  *	ha:	adapter state pointer.
9486  *	tq:	target queue pointer.
9487  *	sp:	SRB pointer.
9488  *	DEVICE_QUEUE_LOCK must be already obtained.
9489  *
9490  * Context:
9491  *	Kernel context.
9492  */
9493 /* ARGSUSED */
9494 static void
9495 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9496 {
9497 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9498 
9499 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9500 		/* Make sure timeout >= 2 * R_A_TOV */
9501 		sp->isp_timeout = (uint16_t)
9502 		    (sp->pkt->pkt_timeout < ha->r_a_tov ? ha->r_a_tov :
9503 		    sp->pkt->pkt_timeout);
9504 
9505 		/*
9506 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9507 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9508 		 * will expire in the next watchdog call, which could be in
9509 		 * 1 microsecond.
9510 		 *
9511 		 * Add 6 more to insure watchdog does not timeout at the same
9512 		 * time as ISP RISC code timeout.
9513 		 */
9514 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9515 		    WATCHDOG_TIME;
9516 		sp->wdg_q_time += 6;
9517 
9518 		/* Save initial time for resetting watchdog time. */
9519 		sp->init_wdg_q_time = sp->wdg_q_time;
9520 
9521 		/* Insert command onto watchdog queue. */
9522 		ql_add_link_b(&tq->wdg, &sp->wdg);
9523 
9524 		sp->flags |= SRB_WATCHDOG_ENABLED;
9525 	} else {
9526 		sp->isp_timeout = 0;
9527 		sp->wdg_q_time = 0;
9528 		sp->init_wdg_q_time = 0;
9529 	}
9530 
9531 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9532 }
9533 
9534 /*
9535  * ql_watchdog
9536  *	Timeout handler that runs in interrupt context. The
9537  *	ql_adapter_state_t * argument is the parameter set up when the
9538  *	timeout was initialized (state structure pointer).
9539  *	Function used to update timeout values and if timeout
9540  *	has occurred command will be aborted.
9541  *
9542  * Input:
9543  *	ha:		adapter state pointer.
9544  *	set_flags:	task daemon flags to set.
9545  *	reset_flags:	task daemon flags to reset.
9546  *
9547  * Context:
9548  *	Interrupt context, no mailbox commands allowed.
9549  */
9550 static void
9551 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9552 {
9553 	ql_srb_t	*sp;
9554 	ql_link_t	*link;
9555 	ql_link_t	*next_cmd;
9556 	ql_link_t	*next_device;
9557 	ql_tgt_t	*tq;
9558 	ql_lun_t	*lq;
9559 	uint16_t	index;
9560 	int		q_sane;
9561 
9562 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9563 
9564 	/* Loop through all targets. */
9565 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9566 		for (link = ha->dev[index].first; link != NULL;
9567 		    link = next_device) {
9568 			tq = link->base_address;
9569 
9570 			/* Try to acquire device queue lock. */
9571 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9572 				next_device = NULL;
9573 				continue;
9574 			}
9575 
9576 			next_device = link->next;
9577 
9578 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9579 			    (tq->port_down_retry_count == 0)) {
9580 				/* Release device queue lock. */
9581 				DEVICE_QUEUE_UNLOCK(tq);
9582 				continue;
9583 			}
9584 
9585 			/* Find out if this device is in a sane state. */
9586 			if (tq->flags & (TQF_RSCN_RCVD |
9587 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9588 				q_sane = 0;
9589 			} else {
9590 				q_sane = 1;
9591 			}
9592 			/* Loop through commands on watchdog queue. */
9593 			for (link = tq->wdg.first; link != NULL;
9594 			    link = next_cmd) {
9595 				next_cmd = link->next;
9596 				sp = link->base_address;
9597 				lq = sp->lun_queue;
9598 
9599 				/*
9600 				 * For SCSI commands, if everything seems to
9601 				 * be going fine and this packet is stuck
9602 				 * because of throttling at LUN or target
9603 				 * level then do not decrement the
9604 				 * sp->wdg_q_time
9605 				 */
9606 				if (ha->task_daemon_flags & STATE_ONLINE &&
9607 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9608 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9609 				    lq->lun_outcnt >= ha->execution_throttle) {
9610 					continue;
9611 				}
9612 
9613 				if (sp->wdg_q_time != 0) {
9614 					sp->wdg_q_time--;
9615 
9616 					/* Timeout? */
9617 					if (sp->wdg_q_time != 0) {
9618 						continue;
9619 					}
9620 
9621 					ql_remove_link(&tq->wdg, &sp->wdg);
9622 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9623 
9624 					if (sp->flags & SRB_ISP_STARTED) {
9625 						ql_cmd_timeout(ha, tq, sp,
9626 						    set_flags, reset_flags);
9627 
9628 						DEVICE_QUEUE_UNLOCK(tq);
9629 						tq = NULL;
9630 						next_cmd = NULL;
9631 						next_device = NULL;
9632 						index = DEVICE_HEAD_LIST_SIZE;
9633 					} else {
9634 						ql_cmd_timeout(ha, tq, sp,
9635 						    set_flags, reset_flags);
9636 					}
9637 				}
9638 			}
9639 
9640 			/* Release device queue lock. */
9641 			if (tq != NULL) {
9642 				DEVICE_QUEUE_UNLOCK(tq);
9643 			}
9644 		}
9645 	}
9646 
9647 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9648 }
9649 
9650 /*
9651  * ql_cmd_timeout
9652  *	Command timeout handler.
9653  *
9654  * Input:
9655  *	ha:		adapter state pointer.
9656  *	tq:		target queue pointer.
9657  *	sp:		SRB pointer.
9658  *	set_flags:	task daemon flags to set.
9659  *	reset_flags:	task daemon flags to reset.
9660  *
9661  * Context:
9662  *	Interrupt context, no mailbox commands allowed.
9663  */
9664 /* ARGSUSED */
9665 static void
9666 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9667     uint32_t *set_flags, uint32_t *reset_flags)
9668 {
9669 
9670 	if (!(sp->flags & SRB_ISP_STARTED)) {
9671 
9672 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9673 
9674 		REQUEST_RING_LOCK(ha);
9675 
9676 		/* if it's on a queue */
9677 		if (sp->cmd.head) {
9678 			/*
9679 			 * The pending_cmds que needs to be
9680 			 * protected by the ring lock
9681 			 */
9682 			ql_remove_link(sp->cmd.head, &sp->cmd);
9683 		}
9684 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9685 
9686 		/* Release device queue lock. */
9687 		REQUEST_RING_UNLOCK(ha);
9688 		DEVICE_QUEUE_UNLOCK(tq);
9689 
9690 		/* Set timeout status */
9691 		sp->pkt->pkt_reason = CS_TIMEOUT;
9692 
9693 		/* Ensure no retry */
9694 		sp->flags &= ~SRB_RETRY;
9695 
9696 		/* Call done routine to handle completion. */
9697 		ql_done(&sp->cmd);
9698 
9699 		DEVICE_QUEUE_LOCK(tq);
9700 	} else {
9701 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9702 		    "isp_abort_needed\n", (void *)sp,
9703 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9704 		    sp->handle & OSC_INDEX_MASK);
9705 
9706 		/* Release device queue lock. */
9707 		DEVICE_QUEUE_UNLOCK(tq);
9708 
9709 		INTR_LOCK(ha);
9710 		ha->pha->xioctl->ControllerErrorCount++;
9711 		INTR_UNLOCK(ha);
9712 
9713 		/* Set ISP needs to be reset */
9714 		sp->flags |= SRB_COMMAND_TIMEOUT;
9715 
9716 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9717 			(void) ql_binary_fw_dump(ha, TRUE);
9718 		}
9719 
9720 		*set_flags |= ISP_ABORT_NEEDED;
9721 
9722 		DEVICE_QUEUE_LOCK(tq);
9723 	}
9724 }
9725 
9726 /*
9727  * ql_rst_aen
9728  *	Processes asynchronous reset.
9729  *
9730  * Input:
9731  *	ha = adapter state pointer.
9732  *
9733  * Context:
9734  *	Kernel context.
9735  */
9736 static void
9737 ql_rst_aen(ql_adapter_state_t *ha)
9738 {
9739 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9740 
9741 	/* Issue marker command. */
9742 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9743 
9744 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9745 }
9746 
9747 /*
9748  * ql_cmd_wait
9749  *	Stall driver until all outstanding commands are returned.
9750  *
9751  * Input:
9752  *	ha = adapter state pointer.
9753  *
9754  * Context:
9755  *	Kernel context.
9756  */
9757 void
9758 ql_cmd_wait(ql_adapter_state_t *ha)
9759 {
9760 	uint16_t		index;
9761 	ql_link_t		*link;
9762 	ql_tgt_t		*tq;
9763 	ql_adapter_state_t	*vha;
9764 
9765 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9766 
9767 	/* Wait for all outstanding commands to be returned. */
9768 	(void) ql_wait_outstanding(ha);
9769 
9770 	/*
9771 	 * clear out internally queued commands
9772 	 */
9773 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9774 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9775 			for (link = vha->dev[index].first; link != NULL;
9776 			    link = link->next) {
9777 				tq = link->base_address;
9778 				if (tq &&
9779 				    (!(tq->prli_svc_param_word_3 &
9780 				    PRLI_W3_RETRY))) {
9781 					(void) ql_abort_device(vha, tq, 0);
9782 				}
9783 			}
9784 		}
9785 	}
9786 
9787 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9788 }
9789 
9790 /*
9791  * ql_wait_outstanding
9792  *	Wait for all outstanding commands to complete.
9793  *
9794  * Input:
9795  *	ha = adapter state pointer.
9796  *
9797  * Returns:
9798  *	index - the index for ql_srb into outstanding_cmds.
9799  *
9800  * Context:
9801  *	Kernel context.
9802  */
9803 static uint16_t
9804 ql_wait_outstanding(ql_adapter_state_t *ha)
9805 {
9806 	ql_srb_t	*sp;
9807 	uint16_t	index, count;
9808 
9809 	count = 3000;
9810 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9811 		if (ha->pha->pending_cmds.first != NULL) {
9812 			ql_start_iocb(ha, NULL);
9813 			index = 1;
9814 		}
9815 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
9816 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
9817 			if (count-- != 0) {
9818 				ql_delay(ha, 10000);
9819 				index = 0;
9820 			} else {
9821 				EL(ha, "failed, sp=%ph\n", (void *)sp);
9822 				break;
9823 			}
9824 		}
9825 	}
9826 
9827 	return (index);
9828 }
9829 
9830 /*
9831  * ql_restart_queues
9832  *	Restart device queues.
9833  *
9834  * Input:
9835  *	ha = adapter state pointer.
9836  *	DEVICE_QUEUE_LOCK must be released.
9837  *
9838  * Context:
9839  *	Interrupt or Kernel context, no mailbox commands allowed.
9840  */
9841 static void
9842 ql_restart_queues(ql_adapter_state_t *ha)
9843 {
9844 	ql_link_t		*link, *link2;
9845 	ql_tgt_t		*tq;
9846 	ql_lun_t		*lq;
9847 	uint16_t		index;
9848 	ql_adapter_state_t	*vha;
9849 
9850 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9851 
9852 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
9853 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9854 			for (link = vha->dev[index].first; link != NULL;
9855 			    link = link->next) {
9856 				tq = link->base_address;
9857 
9858 				/* Acquire device queue lock. */
9859 				DEVICE_QUEUE_LOCK(tq);
9860 
9861 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
9862 
9863 				for (link2 = tq->lun_queues.first;
9864 				    link2 != NULL; link2 = link2->next) {
9865 					lq = link2->base_address;
9866 
9867 					if (lq->cmd.first != NULL) {
9868 						ql_next(vha, lq);
9869 						DEVICE_QUEUE_LOCK(tq);
9870 					}
9871 				}
9872 
9873 				/* Release device queue lock. */
9874 				DEVICE_QUEUE_UNLOCK(tq);
9875 			}
9876 		}
9877 	}
9878 
9879 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9880 }
9881 
9882 
9883 /*
9884  * ql_iidma
9885  *	Setup iiDMA parameters to firmware
9886  *
9887  * Input:
9888  *	ha = adapter state pointer.
9889  *	DEVICE_QUEUE_LOCK must be released.
9890  *
9891  * Context:
9892  *	Interrupt or Kernel context, no mailbox commands allowed.
9893  */
9894 static void
9895 ql_iidma(ql_adapter_state_t *ha)
9896 {
9897 	ql_link_t	*link;
9898 	ql_tgt_t	*tq;
9899 	uint16_t	index;
9900 	char		buf[256];
9901 	uint32_t	data;
9902 
9903 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9904 
9905 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
9906 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9907 		return;
9908 	}
9909 
9910 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9911 		for (link = ha->dev[index].first; link != NULL;
9912 		    link = link->next) {
9913 			tq = link->base_address;
9914 
9915 			/* Acquire device queue lock. */
9916 			DEVICE_QUEUE_LOCK(tq);
9917 
9918 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
9919 				DEVICE_QUEUE_UNLOCK(tq);
9920 				continue;
9921 			}
9922 
9923 			tq->flags &= ~TQF_IIDMA_NEEDED;
9924 
9925 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
9926 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
9927 				DEVICE_QUEUE_UNLOCK(tq);
9928 				continue;
9929 			}
9930 
9931 			/* Get the iiDMA persistent data */
9932 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
9933 				(void) sprintf(buf,
9934 				    "iidma-rate-%02x%02x%02x%02x%02x"
9935 				    "%02x%02x%02x", tq->port_name[0],
9936 				    tq->port_name[1], tq->port_name[2],
9937 				    tq->port_name[3], tq->port_name[4],
9938 				    tq->port_name[5], tq->port_name[6],
9939 				    tq->port_name[7]);
9940 
9941 				if ((data = ql_get_prop(ha, buf)) ==
9942 				    0xffffffff) {
9943 					tq->iidma_rate = IIDMA_RATE_NDEF;
9944 				} else {
9945 					switch (data) {
9946 					case IIDMA_RATE_1GB:
9947 					case IIDMA_RATE_2GB:
9948 					case IIDMA_RATE_4GB:
9949 						tq->iidma_rate = data;
9950 						break;
9951 					case IIDMA_RATE_8GB:
9952 						if (CFG_IST(ha,
9953 						    CFG_CTRL_25XX)) {
9954 							tq->iidma_rate = data;
9955 						} else {
9956 							tq->iidma_rate =
9957 							    IIDMA_RATE_4GB;
9958 						}
9959 						break;
9960 					default:
9961 						EL(ha, "invalid data for "
9962 						    "parameter: %s: %xh\n",
9963 						    buf, data);
9964 						tq->iidma_rate =
9965 						    IIDMA_RATE_NDEF;
9966 						break;
9967 					}
9968 				}
9969 			}
9970 
9971 			/* Set the firmware's iiDMA rate */
9972 			if (tq->iidma_rate <= IIDMA_RATE_MAX) {
9973 				data = ql_iidma_rate(ha, tq->loop_id,
9974 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
9975 				if (data != QL_SUCCESS) {
9976 					EL(ha, "mbx failed: %xh\n", data);
9977 				}
9978 			}
9979 
9980 			/* Release device queue lock. */
9981 			DEVICE_QUEUE_UNLOCK(tq);
9982 		}
9983 	}
9984 
9985 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9986 }
9987 
9988 /*
9989  * ql_abort_queues
9990  *	Abort all commands on device queues.
9991  *
9992  * Input:
9993  *	ha = adapter state pointer.
9994  *
9995  * Context:
9996  *	Interrupt or Kernel context, no mailbox commands allowed.
9997  */
9998 static void
9999 ql_abort_queues(ql_adapter_state_t *ha)
10000 {
10001 	ql_link_t		*link, *link1, *link2;
10002 	ql_tgt_t		*tq;
10003 	ql_lun_t		*lq;
10004 	ql_srb_t		*sp;
10005 	uint16_t		index;
10006 	ql_adapter_state_t	*vha;
10007 
10008 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10009 
10010 	/* Return all commands in outstanding command list. */
10011 	INTR_LOCK(ha);
10012 
10013 	/* Place all commands in outstanding cmd list on device queue. */
10014 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10015 		if (ha->pending_cmds.first != NULL) {
10016 			INTR_UNLOCK(ha);
10017 			ql_start_iocb(ha, NULL);
10018 			/* Delay for system */
10019 			ql_delay(ha, 10000);
10020 			INTR_LOCK(ha);
10021 			index = 1;
10022 		}
10023 		sp = ha->outstanding_cmds[index];
10024 		if (sp != NULL) {
10025 			ha->outstanding_cmds[index] = NULL;
10026 			sp->handle = 0;
10027 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10028 
10029 			INTR_UNLOCK(ha);
10030 
10031 			/* Set ending status. */
10032 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10033 			sp->flags |= SRB_ISP_COMPLETED;
10034 
10035 			/* Call done routine to handle completions. */
10036 			sp->cmd.next = NULL;
10037 			ql_done(&sp->cmd);
10038 
10039 			INTR_LOCK(ha);
10040 		}
10041 	}
10042 	INTR_UNLOCK(ha);
10043 
10044 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
10045 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10046 		    vha->instance, vha->vp_index);
10047 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10048 			for (link = vha->dev[index].first; link != NULL;
10049 			    link = link->next) {
10050 				tq = link->base_address;
10051 
10052 				/*
10053 				 * Set port unavailable status for
10054 				 * all commands on device queue.
10055 				 */
10056 				DEVICE_QUEUE_LOCK(tq);
10057 
10058 				for (link1 = tq->lun_queues.first;
10059 				    link1 != NULL; link1 = link1->next) {
10060 					lq = link1->base_address;
10061 
10062 					link2 = lq->cmd.first;
10063 					while (link2 != NULL) {
10064 						sp = link2->base_address;
10065 
10066 						if (sp->flags & SRB_ABORT) {
10067 							link2 = link2->next;
10068 							continue;
10069 						}
10070 
10071 						/* Rem srb from dev cmd q. */
10072 						ql_remove_link(&lq->cmd,
10073 						    &sp->cmd);
10074 						sp->flags &=
10075 						    ~SRB_IN_DEVICE_QUEUE;
10076 
10077 						/* Release device queue lock */
10078 						DEVICE_QUEUE_UNLOCK(tq);
10079 
10080 						/* Set ending status. */
10081 						sp->pkt->pkt_reason =
10082 						    CS_PORT_UNAVAILABLE;
10083 
10084 						/*
10085 						 * Call done routine to handle
10086 						 * completions.
10087 						 */
10088 						ql_done(&sp->cmd);
10089 
10090 						/* Delay for system */
10091 						ql_delay(ha, 10000);
10092 
10093 						/* Acquire device queue lock */
10094 						DEVICE_QUEUE_LOCK(tq);
10095 						link2 = lq->cmd.first;
10096 					}
10097 				}
10098 				/* Release device queue lock. */
10099 				DEVICE_QUEUE_UNLOCK(tq);
10100 			}
10101 		}
10102 	}
10103 
10104 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10105 }
10106 
10107 /*
10108  * ql_loop_resync
10109  *	Resync with fibre channel devices.
10110  *
10111  * Input:
10112  *	ha = adapter state pointer.
10113  *	DEVICE_QUEUE_LOCK must be released.
10114  *
10115  * Returns:
10116  *	ql local function return status code.
10117  *
10118  * Context:
10119  *	Kernel context.
10120  */
10121 static int
10122 ql_loop_resync(ql_adapter_state_t *ha)
10123 {
10124 	int rval;
10125 
10126 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10127 
10128 	if (ha->flags & IP_INITIALIZED) {
10129 		(void) ql_shutdown_ip(ha);
10130 	}
10131 
10132 	rval = ql_fw_ready(ha, 10);
10133 
10134 	TASK_DAEMON_LOCK(ha);
10135 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10136 	TASK_DAEMON_UNLOCK(ha);
10137 
10138 	/* Set loop online, if it really is. */
10139 	if (rval == QL_SUCCESS) {
10140 		ql_loop_online(ha);
10141 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10142 	} else {
10143 		EL(ha, "failed, rval = %xh\n", rval);
10144 	}
10145 
10146 	return (rval);
10147 }
10148 
10149 /*
10150  * ql_loop_online
10151  *	Set loop online status if it really is online.
10152  *
10153  * Input:
10154  *	ha = adapter state pointer.
10155  *	DEVICE_QUEUE_LOCK must be released.
10156  *
10157  * Context:
10158  *	Kernel context.
10159  */
10160 void
10161 ql_loop_online(ql_adapter_state_t *ha)
10162 {
10163 	ql_adapter_state_t	*vha;
10164 
10165 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10166 
10167 	/* Inform the FC Transport that the hardware is online. */
10168 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10169 		if (!(vha->task_daemon_flags &
10170 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10171 			/* Restart IP if it was shutdown. */
10172 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10173 			    !(vha->flags & IP_INITIALIZED)) {
10174 				(void) ql_initialize_ip(vha);
10175 				ql_isp_rcvbuf(vha);
10176 			}
10177 
10178 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10179 			    FC_PORT_STATE_MASK(vha->state) !=
10180 			    FC_STATE_ONLINE) {
10181 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10182 				if (vha->topology & QL_LOOP_CONNECTION) {
10183 					vha->state |= FC_STATE_LOOP;
10184 				} else {
10185 					vha->state |= FC_STATE_ONLINE;
10186 				}
10187 				TASK_DAEMON_LOCK(ha);
10188 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10189 				TASK_DAEMON_UNLOCK(ha);
10190 			}
10191 		}
10192 	}
10193 
10194 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10195 
10196 	/* Restart device queues that may have been stopped. */
10197 	ql_restart_queues(ha);
10198 
10199 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10200 }
10201 
10202 /*
10203  * ql_fca_handle_to_state
10204  *	Verifies handle to be correct.
10205  *
10206  * Input:
10207  *	fca_handle = pointer to state structure.
10208  *
10209  * Returns:
10210  *	NULL = failure
10211  *
10212  * Context:
10213  *	Kernel context.
10214  */
10215 static ql_adapter_state_t *
10216 ql_fca_handle_to_state(opaque_t fca_handle)
10217 {
10218 #ifdef	QL_DEBUG_ROUTINES
10219 	ql_link_t		*link;
10220 	ql_adapter_state_t	*ha = NULL;
10221 	ql_adapter_state_t	*vha = NULL;
10222 
10223 	for (link = ql_hba.first; link != NULL; link = link->next) {
10224 		ha = link->base_address;
10225 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10226 			if ((opaque_t)vha == fca_handle) {
10227 				ha = vha;
10228 				break;
10229 			}
10230 		}
10231 		if ((opaque_t)ha == fca_handle) {
10232 			break;
10233 		} else {
10234 			ha = NULL;
10235 		}
10236 	}
10237 
10238 	if (ha == NULL) {
10239 		/*EMPTY*/
10240 		QL_PRINT_2(CE_CONT, "failed\n");
10241 	}
10242 
10243 	ASSERT(ha != NULL);
10244 #endif /* QL_DEBUG_ROUTINES */
10245 
10246 	return ((ql_adapter_state_t *)fca_handle);
10247 }
10248 
10249 /*
10250  * ql_d_id_to_queue
10251  *	Locate device queue that matches destination ID.
10252  *
10253  * Input:
10254  *	ha = adapter state pointer.
10255  *	d_id = destination ID
10256  *
10257  * Returns:
10258  *	NULL = failure
10259  *
10260  * Context:
10261  *	Interrupt or Kernel context, no mailbox commands allowed.
10262  */
10263 ql_tgt_t *
10264 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10265 {
10266 	uint16_t	index;
10267 	ql_tgt_t	*tq;
10268 	ql_link_t	*link;
10269 
10270 	/* Get head queue index. */
10271 	index = ql_alpa_to_index[d_id.b.al_pa];
10272 
10273 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10274 		tq = link->base_address;
10275 		if (tq->d_id.b24 == d_id.b24 &&
10276 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10277 			return (tq);
10278 		}
10279 	}
10280 
10281 	return (NULL);
10282 }
10283 
10284 /*
10285  * ql_loop_id_to_queue
10286  *	Locate device queue that matches loop ID.
10287  *
10288  * Input:
10289  *	ha:		adapter state pointer.
10290  *	loop_id:	destination ID
10291  *
10292  * Returns:
10293  *	NULL = failure
10294  *
10295  * Context:
10296  *	Interrupt or Kernel context, no mailbox commands allowed.
10297  */
10298 ql_tgt_t *
10299 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10300 {
10301 	uint16_t	index;
10302 	ql_tgt_t	*tq;
10303 	ql_link_t	*link;
10304 
10305 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10306 		for (link = ha->dev[index].first; link != NULL;
10307 		    link = link->next) {
10308 			tq = link->base_address;
10309 			if (tq->loop_id == loop_id) {
10310 				return (tq);
10311 			}
10312 		}
10313 	}
10314 
10315 	return (NULL);
10316 }
10317 
10318 /*
10319  * ql_kstat_update
10320  *	Updates kernel statistics.
10321  *
10322  * Input:
10323  *	ksp - driver kernel statistics structure pointer.
10324  *	rw - function to perform
10325  *
10326  * Returns:
10327  *	0 or EACCES
10328  *
10329  * Context:
10330  *	Kernel context.
10331  */
10332 /* ARGSUSED */
10333 static int
10334 ql_kstat_update(kstat_t *ksp, int rw)
10335 {
10336 	int			rval;
10337 
10338 	QL_PRINT_3(CE_CONT, "started\n");
10339 
10340 	if (rw == KSTAT_WRITE) {
10341 		rval = EACCES;
10342 	} else {
10343 		rval = 0;
10344 	}
10345 
10346 	if (rval != 0) {
10347 		/*EMPTY*/
10348 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10349 	} else {
10350 		/*EMPTY*/
10351 		QL_PRINT_3(CE_CONT, "done\n");
10352 	}
10353 	return (rval);
10354 }
10355 
10356 /*
10357  * ql_load_flash
10358  *	Loads flash.
10359  *
10360  * Input:
10361  *	ha:	adapter state pointer.
10362  *	dp:	data pointer.
10363  *	size:	data length.
10364  *
10365  * Returns:
10366  *	ql local function return status code.
10367  *
10368  * Context:
10369  *	Kernel context.
10370  */
10371 int
10372 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10373 {
10374 	uint32_t	cnt;
10375 	int		rval;
10376 	uint32_t	size_to_offset;
10377 	uint32_t	size_to_compare;
10378 	int		erase_all;
10379 
10380 	if (CFG_IST(ha, CFG_CTRL_2425)) {
10381 		return (ql_24xx_load_flash(ha, dp, size, 0));
10382 	}
10383 
10384 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10385 
10386 	size_to_compare = 0x20000;
10387 	size_to_offset = 0;
10388 	erase_all = 0;
10389 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10390 		if (size == 0x80000) {
10391 			/* Request to flash the entire chip. */
10392 			size_to_compare = 0x80000;
10393 			erase_all = 1;
10394 		} else {
10395 			size_to_compare = 0x40000;
10396 			if (ql_flash_sbus_fpga) {
10397 				size_to_offset = 0x40000;
10398 			}
10399 		}
10400 	}
10401 	if (size > size_to_compare) {
10402 		rval = QL_FUNCTION_PARAMETER_ERROR;
10403 		EL(ha, "failed=%xh\n", rval);
10404 		return (rval);
10405 	}
10406 
10407 	GLOBAL_HW_LOCK();
10408 
10409 	/* Enable Flash Read/Write. */
10410 	ql_flash_enable(ha);
10411 
10412 	/* Erase flash prior to write. */
10413 	rval = ql_erase_flash(ha, erase_all);
10414 
10415 	if (rval == QL_SUCCESS) {
10416 		/* Write data to flash. */
10417 		for (cnt = 0; cnt < size; cnt++) {
10418 			/* Allow other system activity. */
10419 			if (cnt % 0x1000 == 0) {
10420 				ql_delay(ha, 10000);
10421 			}
10422 			rval = ql_program_flash_address(ha,
10423 			    cnt + size_to_offset, *dp++);
10424 			if (rval != QL_SUCCESS) {
10425 				break;
10426 			}
10427 		}
10428 	}
10429 
10430 	ql_flash_disable(ha);
10431 
10432 	GLOBAL_HW_UNLOCK();
10433 
10434 	if (rval != QL_SUCCESS) {
10435 		EL(ha, "failed=%xh\n", rval);
10436 	} else {
10437 		/*EMPTY*/
10438 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10439 	}
10440 	return (rval);
10441 }
10442 
10443 /*
10444  * ql_program_flash_address
10445  *	Program flash address.
10446  *
10447  * Input:
10448  *	ha = adapter state pointer.
10449  *	addr = flash byte address.
10450  *	data = data to be written to flash.
10451  *
10452  * Returns:
10453  *	ql local function return status code.
10454  *
10455  * Context:
10456  *	Kernel context.
10457  */
10458 static int
10459 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10460 {
10461 	int rval;
10462 
10463 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10464 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10465 		ql_write_flash_byte(ha, addr, data);
10466 	} else {
10467 		/* Write Program Command Sequence */
10468 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10469 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10470 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10471 		ql_write_flash_byte(ha, addr, data);
10472 	}
10473 
10474 	/* Wait for write to complete. */
10475 	rval = ql_poll_flash(ha, addr, data);
10476 
10477 	if (rval != QL_SUCCESS) {
10478 		EL(ha, "failed=%xh\n", rval);
10479 	}
10480 	return (rval);
10481 }
10482 
10483 /*
10484  * ql_erase_flash
10485  *	Erases entire flash.
10486  *
10487  * Input:
10488  *	ha = adapter state pointer.
10489  *
10490  * Returns:
10491  *	ql local function return status code.
10492  *
10493  * Context:
10494  *	Kernel context.
10495  */
10496 int
10497 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10498 {
10499 	int		rval;
10500 	uint32_t	erase_delay = 2000000;
10501 	uint32_t	sStartAddr;
10502 	uint32_t	ssize;
10503 	uint32_t	cnt;
10504 	uint8_t		*bfp;
10505 	uint8_t		*tmp;
10506 
10507 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10508 
10509 		if (ql_flash_sbus_fpga == 1) {
10510 			ssize = QL_SBUS_FCODE_SIZE;
10511 			sStartAddr = QL_FCODE_OFFSET;
10512 		} else {
10513 			ssize = QL_FPGA_SIZE;
10514 			sStartAddr = QL_FPGA_OFFSET;
10515 		}
10516 
10517 		erase_delay = 20000000;
10518 
10519 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10520 
10521 		/* Save the section of flash we're not updating to buffer */
10522 		tmp = bfp;
10523 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10524 			/* Allow other system activity. */
10525 			if (cnt % 0x1000 == 0) {
10526 				ql_delay(ha, 10000);
10527 			}
10528 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10529 		}
10530 	}
10531 
10532 	/* Chip Erase Command Sequence */
10533 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10534 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10535 	ql_write_flash_byte(ha, 0x5555, 0x80);
10536 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10537 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10538 	ql_write_flash_byte(ha, 0x5555, 0x10);
10539 
10540 	ql_delay(ha, erase_delay);
10541 
10542 	/* Wait for erase to complete. */
10543 	rval = ql_poll_flash(ha, 0, 0x80);
10544 
10545 	if (rval != QL_SUCCESS) {
10546 		EL(ha, "failed=%xh\n", rval);
10547 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10548 			kmem_free(bfp, ssize);
10549 		}
10550 		return (rval);
10551 	}
10552 
10553 	/* restore the section we saved in the buffer */
10554 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10555 		/* Restore the section we saved off */
10556 		tmp = bfp;
10557 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10558 			/* Allow other system activity. */
10559 			if (cnt % 0x1000 == 0) {
10560 				ql_delay(ha, 10000);
10561 			}
10562 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10563 			if (rval != QL_SUCCESS) {
10564 				break;
10565 			}
10566 		}
10567 
10568 		kmem_free(bfp, ssize);
10569 	}
10570 
10571 	if (rval != QL_SUCCESS) {
10572 		EL(ha, "failed=%xh\n", rval);
10573 	}
10574 	return (rval);
10575 }
10576 
10577 /*
10578  * ql_poll_flash
10579  *	Polls flash for completion.
10580  *
10581  * Input:
10582  *	ha = adapter state pointer.
10583  *	addr = flash byte address.
10584  *	data = data to be polled.
10585  *
10586  * Returns:
10587  *	ql local function return status code.
10588  *
10589  * Context:
10590  *	Kernel context.
10591  */
10592 int
10593 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10594 {
10595 	uint8_t		flash_data;
10596 	uint32_t	cnt;
10597 	int		rval = QL_FUNCTION_FAILED;
10598 
10599 	poll_data = (uint8_t)(poll_data & BIT_7);
10600 
10601 	/* Wait for 30 seconds for command to finish. */
10602 	for (cnt = 30000000; cnt; cnt--) {
10603 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10604 
10605 		if ((flash_data & BIT_7) == poll_data) {
10606 			rval = QL_SUCCESS;
10607 			break;
10608 		}
10609 		if (flash_data & BIT_5 && cnt > 2) {
10610 			cnt = 2;
10611 		}
10612 		drv_usecwait(1);
10613 	}
10614 
10615 	if (rval != QL_SUCCESS) {
10616 		EL(ha, "failed=%xh\n", rval);
10617 	}
10618 	return (rval);
10619 }
10620 
10621 /*
10622  * ql_flash_enable
10623  *	Setup flash for reading/writing.
10624  *
10625  * Input:
10626  *	ha = adapter state pointer.
10627  *
10628  * Context:
10629  *	Kernel context.
10630  */
10631 void
10632 ql_flash_enable(ql_adapter_state_t *ha)
10633 {
10634 	uint16_t	data;
10635 
10636 	/* Enable Flash Read/Write. */
10637 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10638 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10639 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10640 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10641 		ddi_put16(ha->sbus_fpga_dev_handle,
10642 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10643 		/* Read reset command sequence */
10644 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10645 		ql_write_flash_byte(ha, 0x555, 0x55);
10646 		ql_write_flash_byte(ha, 0xaaa, 0x20);
10647 		ql_write_flash_byte(ha, 0x555, 0xf0);
10648 	} else {
10649 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10650 		    ISP_FLASH_ENABLE);
10651 		WRT16_IO_REG(ha, ctrl_status, data);
10652 
10653 		/* Read/Reset Command Sequence */
10654 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10655 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10656 		ql_write_flash_byte(ha, 0x5555, 0xf0);
10657 	}
10658 	(void) ql_read_flash_byte(ha, 0);
10659 }
10660 
10661 /*
10662  * ql_flash_disable
10663  *	Disable flash and allow RISC to run.
10664  *
10665  * Input:
10666  *	ha = adapter state pointer.
10667  *
10668  * Context:
10669  *	Kernel context.
10670  */
10671 void
10672 ql_flash_disable(ql_adapter_state_t *ha)
10673 {
10674 	uint16_t	data;
10675 
10676 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10677 		/*
10678 		 * Lock the flash back up.
10679 		 */
10680 		ql_write_flash_byte(ha, 0x555, 0x90);
10681 		ql_write_flash_byte(ha, 0x555, 0x0);
10682 
10683 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10684 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10685 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10686 		ddi_put16(ha->sbus_fpga_dev_handle,
10687 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10688 	} else {
10689 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10690 		    ~ISP_FLASH_ENABLE);
10691 		WRT16_IO_REG(ha, ctrl_status, data);
10692 	}
10693 }
10694 
10695 /*
10696  * ql_write_flash_byte
10697  *	Write byte to flash.
10698  *
10699  * Input:
10700  *	ha = adapter state pointer.
10701  *	addr = flash byte address.
10702  *	data = data to be written.
10703  *
10704  * Context:
10705  *	Kernel context.
10706  */
10707 void
10708 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10709 {
10710 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10711 		ddi_put16(ha->sbus_fpga_dev_handle,
10712 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10713 		    LSW(addr));
10714 		ddi_put16(ha->sbus_fpga_dev_handle,
10715 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10716 		    MSW(addr));
10717 		ddi_put16(ha->sbus_fpga_dev_handle,
10718 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10719 		    (uint16_t)data);
10720 	} else {
10721 		uint16_t bank_select;
10722 
10723 		/* Setup bit 16 of flash address. */
10724 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10725 
10726 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10727 			bank_select = (uint16_t)(bank_select & ~0xf0);
10728 			bank_select = (uint16_t)(bank_select |
10729 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10730 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10731 		} else {
10732 			if (addr & BIT_16 && !(bank_select &
10733 			    ISP_FLASH_64K_BANK)) {
10734 				bank_select = (uint16_t)(bank_select |
10735 				    ISP_FLASH_64K_BANK);
10736 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10737 			} else if (!(addr & BIT_16) && bank_select &
10738 			    ISP_FLASH_64K_BANK) {
10739 				bank_select = (uint16_t)(bank_select &
10740 				    ~ISP_FLASH_64K_BANK);
10741 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10742 			}
10743 		}
10744 
10745 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10746 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10747 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10748 		} else {
10749 			WRT16_IOMAP_REG(ha, flash_address, addr);
10750 			WRT16_IOMAP_REG(ha, flash_data, data);
10751 		}
10752 	}
10753 }
10754 
10755 /*
10756  * ql_read_flash_byte
10757  *	Reads byte from flash, but must read a word from chip.
10758  *
10759  * Input:
10760  *	ha = adapter state pointer.
10761  *	addr = flash byte address.
10762  *
10763  * Returns:
10764  *	byte from flash.
10765  *
10766  * Context:
10767  *	Kernel context.
10768  */
10769 uint8_t
10770 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
10771 {
10772 	uint8_t	data;
10773 
10774 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10775 		ddi_put16(ha->sbus_fpga_dev_handle,
10776 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10777 		    LSW(addr));
10778 		ddi_put16(ha->sbus_fpga_dev_handle,
10779 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10780 		    MSW(addr));
10781 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
10782 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
10783 	} else {
10784 		uint16_t	bank_select;
10785 
10786 		/* Setup bit 16 of flash address. */
10787 		bank_select = RD16_IO_REG(ha, ctrl_status);
10788 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10789 			bank_select = (uint16_t)(bank_select & ~0xf0);
10790 			bank_select = (uint16_t)(bank_select |
10791 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10792 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10793 		} else {
10794 			if (addr & BIT_16 &&
10795 			    !(bank_select & ISP_FLASH_64K_BANK)) {
10796 				bank_select = (uint16_t)(bank_select |
10797 				    ISP_FLASH_64K_BANK);
10798 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10799 			} else if (!(addr & BIT_16) &&
10800 			    bank_select & ISP_FLASH_64K_BANK) {
10801 				bank_select = (uint16_t)(bank_select &
10802 				    ~ISP_FLASH_64K_BANK);
10803 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10804 			}
10805 		}
10806 
10807 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10808 			WRT16_IO_REG(ha, flash_address, addr);
10809 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
10810 		} else {
10811 			WRT16_IOMAP_REG(ha, flash_address, addr);
10812 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
10813 		}
10814 	}
10815 
10816 	return (data);
10817 }
10818 
10819 /*
10820  * ql_24xx_flash_id
10821  *	Get flash IDs.
10822  *
10823  * Input:
10824  *	ha:		adapter state pointer.
10825  *
10826  * Returns:
10827  *	ql local function return status code.
10828  *
10829  * Context:
10830  *	Kernel context.
10831  */
10832 int
10833 ql_24xx_flash_id(ql_adapter_state_t *vha)
10834 {
10835 	int			rval;
10836 	uint32_t		fdata = 0;
10837 	ql_adapter_state_t	*ha = vha->pha;
10838 	ql_xioctl_t		*xp = ha->xioctl;
10839 
10840 
10841 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
10842 
10843 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_25XX)) {
10844 		fdata = 0;
10845 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
10846 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
10847 	}
10848 
10849 	if (rval != QL_SUCCESS) {
10850 		EL(ha, "24xx read_flash failed=%xh\n", rval);
10851 	} else if (fdata != 0) {
10852 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
10853 		xp->fdesc.flash_id = MSB(LSW(fdata));
10854 		xp->fdesc.flash_len = LSB(MSW(fdata));
10855 	} else {
10856 		xp->fdesc.flash_manuf = ATMEL_FLASH;
10857 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
10858 		xp->fdesc.flash_len = 0;
10859 	}
10860 
10861 	return (rval);
10862 }
10863 
10864 /*
10865  * ql_24xx_load_flash
10866  *	Loads flash.
10867  *
10868  * Input:
10869  *	ha = adapter state pointer.
10870  *	dp = data pointer.
10871  *	size = data length.
10872  *	faddr = 32bit word flash address.
10873  *
10874  * Returns:
10875  *	ql local function return status code.
10876  *
10877  * Context:
10878  *	Kernel context.
10879  */
10880 int
10881 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
10882     uint32_t faddr)
10883 {
10884 	int			rval;
10885 	uint32_t		cnt, rest_addr, fdata, wc;
10886 	dma_mem_t		dmabuf = {0};
10887 	ql_adapter_state_t	*ha = vha->pha;
10888 	ql_xioctl_t		*xp = ha->xioctl;
10889 
10890 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10891 
10892 	/* start address must be 32 bit word aligned */
10893 	if ((faddr & 0x3) != 0) {
10894 		EL(ha, "incorrect buffer size alignment\n");
10895 		return (QL_FUNCTION_PARAMETER_ERROR);
10896 	}
10897 
10898 	GLOBAL_HW_LOCK();
10899 
10900 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
10901 		EL(ha, "ql_setup_flash failed=%xh\n", rval);
10902 	} else {
10903 		/* Allocate DMA buffer */
10904 		if (CFG_IST(ha, CFG_CTRL_25XX)) {
10905 			if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
10906 			    LITTLE_ENDIAN_DMA,
10907 			    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
10908 				EL(ha, "dma alloc failed, rval=%xh\n", rval);
10909 				return (rval);
10910 			}
10911 		}
10912 
10913 		/* setup mask of address range within a sector */
10914 		rest_addr = (xp->fdesc.block_size - 1) >> 2;
10915 
10916 		/* Enable flash write */
10917 		ql_24xx_unprotect_flash(ha);
10918 
10919 		faddr = faddr >> 2;	/* flash gets 32 bit words */
10920 
10921 		/*
10922 		 * Write data to flash.
10923 		 */
10924 		cnt = 0;
10925 		size = (size + 3) >> 2;	/* Round up & convert to dwords */
10926 
10927 		while (cnt < size) {
10928 			/* Beginning of a sector? */
10929 			if ((faddr & rest_addr) == 0) {
10930 				fdata = (faddr & ~rest_addr) << 2;
10931 				fdata = (fdata & 0xff00) |
10932 				    (fdata << 16 & 0xff0000) |
10933 				    (fdata >> 16 & 0xff);
10934 
10935 				if (rest_addr == 0x1fff) {
10936 					/* 32kb sector block erase */
10937 					rval = ql_24xx_write_flash(ha,
10938 					    FLASH_CONF_ADDR | 0x0352, fdata);
10939 				} else {
10940 					/* 64kb sector block erase */
10941 					rval = ql_24xx_write_flash(ha,
10942 					    FLASH_CONF_ADDR | 0x03d8, fdata);
10943 				}
10944 				if (rval != QL_SUCCESS) {
10945 					EL(ha, "Unable to flash sector: "
10946 					    "address=%xh\n", faddr);
10947 					break;
10948 				}
10949 			}
10950 
10951 			/* Write data */
10952 			if (CFG_IST(ha, CFG_CTRL_25XX) &&
10953 			    ((faddr & 0x3f) == 0)) {
10954 				/*
10955 				 * Limit write up to sector boundary.
10956 				 */
10957 				wc = ((~faddr & (rest_addr>>1)) + 1);
10958 
10959 				if (size - cnt < wc) {
10960 					wc = size - cnt;
10961 				}
10962 
10963 				ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
10964 				    (uint8_t *)dmabuf.bp, wc<<2,
10965 				    DDI_DEV_AUTOINCR);
10966 
10967 				rval = ql_wrt_risc_ram(ha, FLASH_DATA_ADDR |
10968 				    faddr, dmabuf.cookie.dmac_laddress, wc);
10969 				if (rval != QL_SUCCESS) {
10970 					EL(ha, "unable to dma to flash "
10971 					    "address=%xh\n", faddr << 2);
10972 					break;
10973 				}
10974 
10975 				cnt += wc;
10976 				faddr += wc;
10977 				dp += wc << 2;
10978 			} else {
10979 				fdata = *dp++;
10980 				fdata |= *dp++ << 8;
10981 				fdata |= *dp++ << 16;
10982 				fdata |= *dp++ << 24;
10983 				rval = ql_24xx_write_flash(ha,
10984 				    FLASH_DATA_ADDR | faddr, fdata);
10985 				if (rval != QL_SUCCESS) {
10986 					EL(ha, "Unable to program flash "
10987 					    "address=%xh data=%xh\n", faddr,
10988 					    *dp);
10989 					break;
10990 				}
10991 				cnt++;
10992 				faddr++;
10993 
10994 				/* Allow other system activity. */
10995 				if (cnt % 0x1000 == 0) {
10996 					ql_delay(ha, 10000);
10997 				}
10998 			}
10999 		}
11000 
11001 		ql_24xx_protect_flash(ha);
11002 
11003 		ql_free_phys(ha, &dmabuf);
11004 	}
11005 
11006 	GLOBAL_HW_UNLOCK();
11007 
11008 	if (rval != QL_SUCCESS) {
11009 		EL(ha, "failed=%xh\n", rval);
11010 	} else {
11011 		/*EMPTY*/
11012 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11013 	}
11014 	return (rval);
11015 }
11016 
11017 /*
11018  * ql_24xx_read_flash
11019  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
11020  *
11021  * Input:
11022  *	ha:	adapter state pointer.
11023  *	faddr:	NVRAM/FLASH address.
11024  *	bp:	data pointer.
11025  *
11026  * Returns:
11027  *	ql local function return status code.
11028  *
11029  * Context:
11030  *	Kernel context.
11031  */
11032 int
11033 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11034 {
11035 	uint32_t		timer;
11036 	int			rval = QL_SUCCESS;
11037 	ql_adapter_state_t	*ha = vha->pha;
11038 
11039 	/* Clear access error flag */
11040 	WRT32_IO_REG(ha, ctrl_status,
11041 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11042 
11043 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11044 
11045 	/* Wait for READ cycle to complete. */
11046 	for (timer = 300000; timer; timer--) {
11047 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11048 			break;
11049 		}
11050 		drv_usecwait(10);
11051 	}
11052 
11053 	if (timer == 0) {
11054 		EL(ha, "failed, timeout\n");
11055 		rval = QL_FUNCTION_TIMEOUT;
11056 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11057 		EL(ha, "failed, access error\n");
11058 		rval = QL_FUNCTION_FAILED;
11059 	}
11060 
11061 	*bp = RD32_IO_REG(ha, flash_data);
11062 
11063 	return (rval);
11064 }
11065 
11066 /*
11067  * ql_24xx_write_flash
11068  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11069  *
11070  * Input:
11071  *	ha:	adapter state pointer.
11072  *	addr:	NVRAM/FLASH address.
11073  *	value:	data.
11074  *
11075  * Returns:
11076  *	ql local function return status code.
11077  *
11078  * Context:
11079  *	Kernel context.
11080  */
11081 int
11082 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11083 {
11084 	uint32_t		timer, fdata;
11085 	int			rval = QL_SUCCESS;
11086 	ql_adapter_state_t	*ha = vha->pha;
11087 
11088 	/* Clear access error flag */
11089 	WRT32_IO_REG(ha, ctrl_status,
11090 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11091 
11092 	WRT32_IO_REG(ha, flash_data, data);
11093 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11094 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11095 
11096 	/* Wait for Write cycle to complete. */
11097 	for (timer = 3000000; timer; timer--) {
11098 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11099 			/* Check flash write in progress. */
11100 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11101 				(void) ql_24xx_read_flash(ha,
11102 				    FLASH_CONF_ADDR | 0x005, &fdata);
11103 				if (!(fdata & BIT_0)) {
11104 					break;
11105 				}
11106 			} else {
11107 				break;
11108 			}
11109 		}
11110 		drv_usecwait(10);
11111 	}
11112 	if (timer == 0) {
11113 		EL(ha, "failed, timeout\n");
11114 		rval = QL_FUNCTION_TIMEOUT;
11115 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11116 		EL(ha, "access error\n");
11117 		rval = QL_FUNCTION_FAILED;
11118 	}
11119 
11120 	return (rval);
11121 }
11122 /*
11123  * ql_24xx_unprotect_flash
11124  *	Enable writes
11125  *
11126  * Input:
11127  *	ha:	adapter state pointer.
11128  *
11129  * Context:
11130  *	Kernel context.
11131  */
11132 void
11133 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11134 {
11135 	uint32_t		fdata;
11136 	ql_adapter_state_t	*ha = vha->pha;
11137 	ql_xioctl_t		*xp = ha->xioctl;
11138 
11139 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
11140 
11141 	/* Enable flash write. */
11142 	WRT32_IO_REG(ha, ctrl_status,
11143 	    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11144 	RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11145 
11146 	/*
11147 	 * Remove block write protection (SST and ST) and
11148 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11149 	 * Unprotect sectors.
11150 	 */
11151 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11152 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11153 
11154 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11155 		for (fdata = 0; fdata < 0x10; fdata++) {
11156 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11157 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11158 		}
11159 
11160 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11161 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11162 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11163 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11164 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11165 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11166 	}
11167 
11168 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
11169 }
11170 
11171 /*
11172  * ql_24xx_protect_flash
11173  *	Disable writes
11174  *
11175  * Input:
11176  *	ha:	adapter state pointer.
11177  *
11178  * Context:
11179  *	Kernel context.
11180  */
11181 void
11182 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11183 {
11184 	uint32_t		fdata;
11185 	ql_adapter_state_t	*ha = vha->pha;
11186 	ql_xioctl_t		*xp = ha->xioctl;
11187 
11188 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
11189 
11190 	/* Enable flash write. */
11191 	WRT32_IO_REG(ha, ctrl_status,
11192 	    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11193 	RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11194 
11195 	/*
11196 	 * Protect sectors.
11197 	 * Set block write protection (SST and ST) and
11198 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11199 	 */
11200 	if (xp->fdesc.protect_sector_cmd != 0) {
11201 		for (fdata = 0; fdata < 0x10; fdata++) {
11202 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11203 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11204 		}
11205 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11206 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11207 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11208 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11209 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11210 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11211 
11212 		/* TODO: ??? */
11213 		(void) ql_24xx_write_flash(ha,
11214 		    FLASH_CONF_ADDR | 0x101, 0x80);
11215 	} else {
11216 		(void) ql_24xx_write_flash(ha,
11217 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11218 	}
11219 
11220 	/* Disable flash write. */
11221 	WRT32_IO_REG(ha, ctrl_status,
11222 	    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11223 	RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11224 
11225 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
11226 }
11227 
11228 /*
11229  * ql_dump_firmware
11230  *	Save RISC code state information.
11231  *
11232  * Input:
11233  *	ha = adapter state pointer.
11234  *
11235  * Returns:
11236  *	QL local function return status code.
11237  *
11238  * Context:
11239  *	Kernel context.
11240  */
11241 static int
11242 ql_dump_firmware(ql_adapter_state_t *vha)
11243 {
11244 	int			rval;
11245 	clock_t			timer;
11246 	ql_adapter_state_t	*ha = vha->pha;
11247 
11248 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11249 
11250 	QL_DUMP_LOCK(ha);
11251 
11252 	if (ha->ql_dump_state & QL_DUMPING ||
11253 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11254 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11255 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11256 		QL_DUMP_UNLOCK(ha);
11257 		return (QL_SUCCESS);
11258 	}
11259 
11260 	QL_DUMP_UNLOCK(ha);
11261 
11262 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11263 
11264 	/*
11265 	 * Wait for all outstanding commands to complete
11266 	 */
11267 	(void) ql_wait_outstanding(ha);
11268 
11269 	/* Dump firmware. */
11270 	rval = ql_binary_fw_dump(ha, TRUE);
11271 
11272 	/* Do abort to force restart. */
11273 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11274 	EL(ha, "restarting, isp_abort_needed\n");
11275 
11276 	/* Acquire task daemon lock. */
11277 	TASK_DAEMON_LOCK(ha);
11278 
11279 	/* Wait for suspension to end. */
11280 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11281 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11282 
11283 		/* 30 seconds from now */
11284 		timer = ddi_get_lbolt();
11285 		timer += drv_usectohz(30000000);
11286 
11287 		if (cv_timedwait(&ha->cv_dr_suspended,
11288 		    &ha->task_daemon_mutex, timer) == -1) {
11289 			/*
11290 			 * The timeout time 'timer' was
11291 			 * reached without the condition
11292 			 * being signaled.
11293 			 */
11294 			break;
11295 		}
11296 	}
11297 
11298 	/* Release task daemon lock. */
11299 	TASK_DAEMON_UNLOCK(ha);
11300 
11301 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11302 		/*EMPTY*/
11303 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11304 	} else {
11305 		EL(ha, "failed, rval = %xh\n", rval);
11306 	}
11307 	return (rval);
11308 }
11309 
11310 /*
11311  * ql_binary_fw_dump
11312  *	Dumps binary data from firmware.
11313  *
11314  * Input:
11315  *	ha = adapter state pointer.
11316  *	lock_needed = mailbox lock needed.
11317  *
11318  * Returns:
11319  *	ql local function return status code.
11320  *
11321  * Context:
11322  *	Interrupt or Kernel context, no mailbox commands allowed.
11323  */
11324 int
11325 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11326 {
11327 	clock_t			timer;
11328 	mbx_cmd_t		mc;
11329 	mbx_cmd_t		*mcp = &mc;
11330 	int			rval = QL_SUCCESS;
11331 	ql_adapter_state_t	*ha = vha->pha;
11332 
11333 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11334 
11335 	QL_DUMP_LOCK(ha);
11336 
11337 	if (ha->ql_dump_state & QL_DUMPING ||
11338 	    (ha->ql_dump_state & QL_DUMP_VALID &&
11339 	    !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11340 		EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11341 		QL_DUMP_UNLOCK(ha);
11342 		return (QL_DATA_EXISTS);
11343 	}
11344 
11345 	ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11346 	ha->ql_dump_state |= QL_DUMPING;
11347 
11348 	QL_DUMP_UNLOCK(ha);
11349 
11350 	if (ha->cfg_flags & CFG_ENABLE_FWEXTTRACE) {
11351 
11352 		/* Insert Time Stamp */
11353 		rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11354 		    FTO_INSERT_TIME_STAMP);
11355 		if (rval != QL_SUCCESS) {
11356 			EL(ha, "f/w extended trace insert"
11357 			    "time stamp failed: %xh\n", rval);
11358 		}
11359 	}
11360 
11361 	if (lock_needed == TRUE) {
11362 		/* Acquire mailbox register lock. */
11363 		MBX_REGISTER_LOCK(ha);
11364 
11365 		/* Check for mailbox available, if not wait for signal. */
11366 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11367 			ha->mailbox_flags = (uint8_t)
11368 			    (ha->mailbox_flags | MBX_WANT_FLG);
11369 
11370 			/* 30 seconds from now */
11371 			timer = ddi_get_lbolt();
11372 			timer += (ha->mcp->timeout + 2) *
11373 			    drv_usectohz(1000000);
11374 			if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11375 			    timer) == -1) {
11376 				/*
11377 				 * The timeout time 'timer' was
11378 				 * reached without the condition
11379 				 * being signaled.
11380 				 */
11381 
11382 				/* Release mailbox register lock. */
11383 				MBX_REGISTER_UNLOCK(ha);
11384 
11385 				EL(ha, "failed, rval = %xh\n",
11386 				    QL_FUNCTION_TIMEOUT);
11387 				return (QL_FUNCTION_TIMEOUT);
11388 			}
11389 		}
11390 
11391 		/* Set busy flag. */
11392 		ha->mailbox_flags = (uint8_t)
11393 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11394 		mcp->timeout = 120;
11395 		ha->mcp = mcp;
11396 
11397 		/* Release mailbox register lock. */
11398 		MBX_REGISTER_UNLOCK(ha);
11399 	}
11400 
11401 	/* Free previous dump buffer. */
11402 	if (ha->ql_dump_ptr != NULL) {
11403 		kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11404 		ha->ql_dump_ptr = NULL;
11405 	}
11406 
11407 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11408 		ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11409 		    ha->fw_ext_memory_size);
11410 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11411 		ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11412 		    ha->fw_ext_memory_size);
11413 	} else {
11414 		ha->ql_dump_size = sizeof (ql_fw_dump_t);
11415 	}
11416 
11417 	if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11418 	    NULL) {
11419 		rval = QL_MEMORY_ALLOC_FAILED;
11420 	} else {
11421 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11422 			rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11423 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11424 			rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11425 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11426 			rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11427 		} else {
11428 			rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11429 		}
11430 	}
11431 
11432 	/* Reset ISP chip. */
11433 	ql_reset_chip(ha);
11434 
11435 	QL_DUMP_LOCK(ha);
11436 
11437 	if (rval != QL_SUCCESS) {
11438 		if (ha->ql_dump_ptr != NULL) {
11439 			kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11440 			ha->ql_dump_ptr = NULL;
11441 		}
11442 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11443 		    QL_DUMP_UPLOADED);
11444 		EL(ha, "failed, rval = %xh\n", rval);
11445 	} else {
11446 		ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11447 		ha->ql_dump_state |= QL_DUMP_VALID;
11448 		EL(ha, "done\n");
11449 	}
11450 
11451 	QL_DUMP_UNLOCK(ha);
11452 
11453 	return (rval);
11454 }
11455 
11456 /*
11457  * ql_ascii_fw_dump
11458  *	Converts firmware binary dump to ascii.
11459  *
11460  * Input:
11461  *	ha = adapter state pointer.
11462  *	bptr = buffer pointer.
11463  *
11464  * Returns:
11465  *	Amount of data buffer used.
11466  *
11467  * Context:
11468  *	Kernel context.
11469  */
11470 size_t
11471 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11472 {
11473 	uint32_t		cnt;
11474 	caddr_t			bp;
11475 	int			mbox_cnt;
11476 	ql_adapter_state_t	*ha = vha->pha;
11477 	ql_fw_dump_t		*fw = ha->ql_dump_ptr;
11478 
11479 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11480 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11481 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11482 		return (ql_25xx_ascii_fw_dump(ha, bufp));
11483 	}
11484 
11485 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11486 
11487 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11488 		(void) sprintf(bufp, "\nISP 2300IP ");
11489 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11490 		(void) sprintf(bufp, "\nISP 6322FLX ");
11491 	} else {
11492 		(void) sprintf(bufp, "\nISP 2200IP ");
11493 	}
11494 
11495 	bp = bufp + strlen(bufp);
11496 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11497 	    ha->fw_major_version, ha->fw_minor_version,
11498 	    ha->fw_subminor_version);
11499 
11500 	(void) strcat(bufp, "\nPBIU Registers:");
11501 	bp = bufp + strlen(bufp);
11502 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11503 		if (cnt % 8 == 0) {
11504 			*bp++ = '\n';
11505 		}
11506 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11507 		bp = bp + 6;
11508 	}
11509 
11510 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11511 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11512 		    "registers:");
11513 		bp = bufp + strlen(bufp);
11514 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11515 			if (cnt % 8 == 0) {
11516 				*bp++ = '\n';
11517 			}
11518 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11519 			bp = bp + 6;
11520 		}
11521 	}
11522 
11523 	(void) strcat(bp, "\n\nMailbox Registers:");
11524 	bp = bufp + strlen(bufp);
11525 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11526 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11527 		if (cnt % 8 == 0) {
11528 			*bp++ = '\n';
11529 		}
11530 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11531 		bp = bp + 6;
11532 	}
11533 
11534 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11535 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11536 		bp = bufp + strlen(bufp);
11537 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11538 			if (cnt % 8 == 0) {
11539 				*bp++ = '\n';
11540 			}
11541 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11542 			bp = bp + 6;
11543 		}
11544 	}
11545 
11546 	(void) strcat(bp, "\n\nDMA Registers:");
11547 	bp = bufp + strlen(bufp);
11548 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11549 		if (cnt % 8 == 0) {
11550 			*bp++ = '\n';
11551 		}
11552 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11553 		bp = bp + 6;
11554 	}
11555 
11556 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11557 	bp = bufp + strlen(bufp);
11558 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11559 		if (cnt % 8 == 0) {
11560 			*bp++ = '\n';
11561 		}
11562 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11563 		bp = bp + 6;
11564 	}
11565 
11566 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11567 	bp = bufp + strlen(bufp);
11568 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11569 		if (cnt % 8 == 0) {
11570 			*bp++ = '\n';
11571 		}
11572 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11573 		bp = bp + 6;
11574 	}
11575 
11576 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11577 	bp = bufp + strlen(bufp);
11578 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11579 		if (cnt % 8 == 0) {
11580 			*bp++ = '\n';
11581 		}
11582 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11583 		bp = bp + 6;
11584 	}
11585 
11586 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11587 	bp = bufp + strlen(bufp);
11588 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11589 		if (cnt % 8 == 0) {
11590 			*bp++ = '\n';
11591 		}
11592 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11593 		bp = bp + 6;
11594 	}
11595 
11596 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11597 	bp = bufp + strlen(bufp);
11598 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11599 		if (cnt % 8 == 0) {
11600 			*bp++ = '\n';
11601 		}
11602 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11603 		bp = bp + 6;
11604 	}
11605 
11606 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11607 	bp = bufp + strlen(bufp);
11608 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11609 		if (cnt % 8 == 0) {
11610 			*bp++ = '\n';
11611 		}
11612 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11613 		bp = bp + 6;
11614 	}
11615 
11616 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11617 	bp = bufp + strlen(bufp);
11618 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11619 		if (cnt % 8 == 0) {
11620 			*bp++ = '\n';
11621 		}
11622 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11623 		bp = bp + 6;
11624 	}
11625 
11626 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11627 	bp = bufp + strlen(bufp);
11628 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11629 		if (cnt % 8 == 0) {
11630 			*bp++ = '\n';
11631 		}
11632 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11633 		bp = bp + 6;
11634 	}
11635 
11636 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11637 	bp = bufp + strlen(bufp);
11638 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11639 		if (cnt % 8 == 0) {
11640 			*bp++ = '\n';
11641 		}
11642 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11643 		bp = bp + 6;
11644 	}
11645 
11646 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11647 	bp = bufp + strlen(bufp);
11648 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11649 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11650 		    CFG_CTRL_6322)) == 0))) {
11651 			break;
11652 		}
11653 		if (cnt % 8 == 0) {
11654 			*bp++ = '\n';
11655 		}
11656 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11657 		bp = bp + 6;
11658 	}
11659 
11660 	(void) strcat(bp, "\n\nFPM B0 Registers:");
11661 	bp = bufp + strlen(bufp);
11662 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11663 		if (cnt % 8 == 0) {
11664 			*bp++ = '\n';
11665 		}
11666 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11667 		bp = bp + 6;
11668 	}
11669 
11670 	(void) strcat(bp, "\n\nFPM B1 Registers:");
11671 	bp = bufp + strlen(bufp);
11672 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11673 		if (cnt % 8 == 0) {
11674 			*bp++ = '\n';
11675 		}
11676 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11677 		bp = bp + 6;
11678 	}
11679 
11680 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11681 		(void) strcat(bp, "\n\nCode RAM Dump:");
11682 		bp = bufp + strlen(bufp);
11683 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11684 			if (cnt % 8 == 0) {
11685 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11686 				bp = bp + 8;
11687 			}
11688 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11689 			bp = bp + 6;
11690 		}
11691 
11692 		(void) strcat(bp, "\n\nStack RAM Dump:");
11693 		bp = bufp + strlen(bufp);
11694 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11695 			if (cnt % 8 == 0) {
11696 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11697 				bp = bp + 8;
11698 			}
11699 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11700 			bp = bp + 6;
11701 		}
11702 
11703 		(void) strcat(bp, "\n\nData RAM Dump:");
11704 		bp = bufp + strlen(bufp);
11705 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11706 			if (cnt % 8 == 0) {
11707 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11708 				bp = bp + 8;
11709 			}
11710 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11711 			bp = bp + 6;
11712 		}
11713 	} else {
11714 		(void) strcat(bp, "\n\nRISC SRAM:");
11715 		bp = bufp + strlen(bufp);
11716 		for (cnt = 0; cnt < 0xf000; cnt++) {
11717 			if (cnt % 8 == 0) {
11718 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
11719 				bp = bp + 7;
11720 			}
11721 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11722 			bp = bp + 6;
11723 		}
11724 	}
11725 
11726 	(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
11727 	bp += strlen(bp);
11728 
11729 	(void) sprintf(bp, "\n\nRequest Queue");
11730 	bp += strlen(bp);
11731 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
11732 		if (cnt % 8 == 0) {
11733 			(void) sprintf(bp, "\n%08x: ", cnt);
11734 			bp += strlen(bp);
11735 		}
11736 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
11737 		bp += strlen(bp);
11738 	}
11739 
11740 	(void) sprintf(bp, "\n\nResponse Queue");
11741 	bp += strlen(bp);
11742 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
11743 		if (cnt % 8 == 0) {
11744 			(void) sprintf(bp, "\n%08x: ", cnt);
11745 			bp += strlen(bp);
11746 		}
11747 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
11748 		bp += strlen(bp);
11749 	}
11750 
11751 	(void) sprintf(bp, "\n");
11752 
11753 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11754 
11755 	return (strlen(bufp));
11756 }
11757 
11758 /*
11759  * ql_24xx_ascii_fw_dump
11760  *	Converts ISP24xx firmware binary dump to ascii.
11761  *
11762  * Input:
11763  *	ha = adapter state pointer.
11764  *	bptr = buffer pointer.
11765  *
11766  * Returns:
11767  *	Amount of data buffer used.
11768  *
11769  * Context:
11770  *	Kernel context.
11771  */
11772 static size_t
11773 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
11774 {
11775 	uint32_t		cnt;
11776 	caddr_t			bp = bufp;
11777 	ql_24xx_fw_dump_t	*fw = ha->ql_dump_ptr;
11778 
11779 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11780 
11781 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
11782 	    ha->fw_major_version, ha->fw_minor_version,
11783 	    ha->fw_subminor_version, ha->fw_attributes);
11784 	bp += strlen(bp);
11785 
11786 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
11787 
11788 	(void) strcat(bp, "\nHost Interface Registers");
11789 	bp += strlen(bp);
11790 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
11791 		if (cnt % 8 == 0) {
11792 			(void) sprintf(bp++, "\n");
11793 		}
11794 
11795 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
11796 		bp += 9;
11797 	}
11798 
11799 	(void) sprintf(bp, "\n\nMailbox Registers");
11800 	bp += strlen(bp);
11801 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
11802 		if (cnt % 16 == 0) {
11803 			(void) sprintf(bp++, "\n");
11804 		}
11805 
11806 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
11807 		bp += 5;
11808 	}
11809 
11810 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
11811 	bp += strlen(bp);
11812 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
11813 		if (cnt % 8 == 0) {
11814 			(void) sprintf(bp++, "\n");
11815 		}
11816 
11817 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
11818 		bp += 9;
11819 	}
11820 
11821 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
11822 	bp += strlen(bp);
11823 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
11824 		if (cnt % 8 == 0) {
11825 			(void) sprintf(bp++, "\n");
11826 		}
11827 
11828 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
11829 		bp += 9;
11830 	}
11831 
11832 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
11833 	bp += strlen(bp);
11834 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
11835 		if (cnt % 8 == 0) {
11836 			(void) sprintf(bp++, "\n");
11837 		}
11838 
11839 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
11840 		bp += 9;
11841 	}
11842 
11843 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
11844 	bp += strlen(bp);
11845 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
11846 		if (cnt % 8 == 0) {
11847 			(void) sprintf(bp++, "\n");
11848 		}
11849 
11850 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
11851 		bp += 9;
11852 	}
11853 
11854 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
11855 	bp += strlen(bp);
11856 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
11857 		if (cnt % 8 == 0) {
11858 			(void) sprintf(bp++, "\n");
11859 		}
11860 
11861 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
11862 		bp += 9;
11863 	}
11864 
11865 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
11866 	bp += strlen(bp);
11867 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
11868 		if (cnt % 8 == 0) {
11869 			(void) sprintf(bp++, "\n");
11870 		}
11871 
11872 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
11873 		bp += 9;
11874 	}
11875 
11876 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
11877 	bp += strlen(bp);
11878 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
11879 		if (cnt % 8 == 0) {
11880 			(void) sprintf(bp++, "\n");
11881 		}
11882 
11883 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
11884 		bp += 9;
11885 	}
11886 
11887 	(void) sprintf(bp, "\n\nCommand DMA Registers");
11888 	bp += strlen(bp);
11889 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
11890 		if (cnt % 8 == 0) {
11891 			(void) sprintf(bp++, "\n");
11892 		}
11893 
11894 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
11895 		bp += 9;
11896 	}
11897 
11898 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
11899 	bp += strlen(bp);
11900 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
11901 		if (cnt % 8 == 0) {
11902 			(void) sprintf(bp++, "\n");
11903 		}
11904 
11905 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
11906 		bp += 9;
11907 	}
11908 
11909 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
11910 	bp += strlen(bp);
11911 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
11912 		if (cnt % 8 == 0) {
11913 			(void) sprintf(bp++, "\n");
11914 		}
11915 
11916 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
11917 		bp += 9;
11918 	}
11919 
11920 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
11921 	bp += strlen(bp);
11922 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
11923 		if (cnt % 8 == 0) {
11924 			(void) sprintf(bp++, "\n");
11925 		}
11926 
11927 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
11928 		bp += 9;
11929 	}
11930 
11931 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
11932 	bp += strlen(bp);
11933 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
11934 		if (cnt % 8 == 0) {
11935 			(void) sprintf(bp++, "\n");
11936 		}
11937 
11938 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
11939 		bp += 9;
11940 	}
11941 
11942 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
11943 	bp += strlen(bp);
11944 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
11945 		if (cnt % 8 == 0) {
11946 			(void) sprintf(bp++, "\n");
11947 		}
11948 
11949 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
11950 		bp += 9;
11951 	}
11952 
11953 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
11954 	bp += strlen(bp);
11955 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
11956 		if (cnt % 8 == 0) {
11957 			(void) sprintf(bp++, "\n");
11958 		}
11959 
11960 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
11961 		bp += 9;
11962 	}
11963 
11964 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
11965 	bp += strlen(bp);
11966 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
11967 		if (cnt % 8 == 0) {
11968 			(void) sprintf(bp++, "\n");
11969 		}
11970 
11971 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
11972 		bp += 9;
11973 	}
11974 
11975 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
11976 	bp += strlen(bp);
11977 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
11978 		if (cnt % 8 == 0) {
11979 			(void) sprintf(bp++, "\n");
11980 		}
11981 
11982 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
11983 		bp += 9;
11984 	}
11985 
11986 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
11987 	bp += strlen(bp);
11988 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
11989 		if (cnt % 8 == 0) {
11990 			(void) sprintf(bp++, "\n");
11991 		}
11992 
11993 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
11994 		bp += 9;
11995 	}
11996 
11997 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
11998 	bp += strlen(bp);
11999 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12000 		if (cnt % 8 == 0) {
12001 			(void) sprintf(bp++, "\n");
12002 		}
12003 
12004 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12005 		bp += 9;
12006 	}
12007 
12008 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12009 	bp += strlen(bp);
12010 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12011 		if (cnt % 8 == 0) {
12012 			(void) sprintf(bp++, "\n");
12013 		}
12014 
12015 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12016 		bp += 9;
12017 	}
12018 
12019 	(void) sprintf(bp, "\n\nRISC GP Registers");
12020 	bp += strlen(bp);
12021 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12022 		if (cnt % 8 == 0) {
12023 			(void) sprintf(bp++, "\n");
12024 		}
12025 
12026 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12027 		bp += 9;
12028 	}
12029 
12030 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12031 	bp += strlen(bp);
12032 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12033 		if (cnt % 8 == 0) {
12034 			(void) sprintf(bp++, "\n");
12035 		}
12036 
12037 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12038 		bp += 9;
12039 	}
12040 
12041 	(void) sprintf(bp, "\n\nLMC Registers");
12042 	bp += strlen(bp);
12043 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12044 		if (cnt % 8 == 0) {
12045 			(void) sprintf(bp++, "\n");
12046 		}
12047 
12048 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12049 		bp += 9;
12050 	}
12051 
12052 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12053 	bp += strlen(bp);
12054 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12055 		if (cnt % 8 == 0) {
12056 			(void) sprintf(bp++, "\n");
12057 		}
12058 
12059 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12060 		bp += 9;
12061 	}
12062 
12063 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12064 	bp += strlen(bp);
12065 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12066 		if (cnt % 8 == 0) {
12067 			(void) sprintf(bp++, "\n");
12068 		}
12069 
12070 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12071 		bp += 9;
12072 	}
12073 
12074 	(void) sprintf(bp, "\n\nCode RAM");
12075 	bp += strlen(bp);
12076 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12077 		if (cnt % 8 == 0) {
12078 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12079 			bp += 11;
12080 		}
12081 
12082 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12083 		bp += 9;
12084 	}
12085 
12086 	(void) sprintf(bp, "\n\nExternal Memory");
12087 	bp += strlen(bp);
12088 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12089 		if (cnt % 8 == 0) {
12090 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12091 			bp += 11;
12092 		}
12093 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12094 		bp += 9;
12095 	}
12096 
12097 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12098 	bp += strlen(bp);
12099 
12100 	(void) sprintf(bp, "\n\nRequest Queue");
12101 	bp += strlen(bp);
12102 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12103 		if (cnt % 8 == 0) {
12104 			(void) sprintf(bp, "\n%08x: ", cnt);
12105 			bp += strlen(bp);
12106 		}
12107 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12108 		bp += strlen(bp);
12109 	}
12110 
12111 	(void) sprintf(bp, "\n\nResponse Queue");
12112 	bp += strlen(bp);
12113 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12114 		if (cnt % 8 == 0) {
12115 			(void) sprintf(bp, "\n%08x: ", cnt);
12116 			bp += strlen(bp);
12117 		}
12118 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12119 		bp += strlen(bp);
12120 	}
12121 
12122 
12123 
12124 	if ((ha->cfg_flags & CFG_ENABLE_FWEXTTRACE) &&
12125 	    (ha->fwexttracebuf.bp != NULL)) {
12126 		uint32_t cnt_b = 0;
12127 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12128 
12129 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12130 		bp += strlen(bp);
12131 		/* show data address as a byte address, data as long words */
12132 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12133 			cnt_b = cnt * 4;
12134 			if (cnt_b % 32 == 0) {
12135 				(void) sprintf(bp, "\n%08x: ",
12136 				    (int)(w64 + cnt_b));
12137 				bp += 11;
12138 			}
12139 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12140 			bp += 9;
12141 		}
12142 	}
12143 
12144 	if ((ha->cfg_flags & CFG_ENABLE_FWFCETRACE) &&
12145 	    (ha->fwfcetracebuf.bp != NULL)) {
12146 		uint32_t cnt_b = 0;
12147 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12148 
12149 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12150 		bp += strlen(bp);
12151 		/* show data address as a byte address, data as long words */
12152 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12153 			cnt_b = cnt * 4;
12154 			if (cnt_b % 32 == 0) {
12155 				(void) sprintf(bp, "\n%08x: ",
12156 				    (int)(w64 + cnt_b));
12157 				bp += 11;
12158 			}
12159 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12160 			bp += 9;
12161 		}
12162 	}
12163 
12164 	(void) sprintf(bp, "\n\n");
12165 	bp += strlen(bp);
12166 
12167 	cnt = (uintptr_t)bp - (uintptr_t)bufp;
12168 
12169 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12170 
12171 	return (cnt);
12172 }
12173 
12174 /*
12175  * ql_25xx_ascii_fw_dump
12176  *	Converts ISP25xx firmware binary dump to ascii.
12177  *
12178  * Input:
12179  *	ha = adapter state pointer.
12180  *	bptr = buffer pointer.
12181  *
12182  * Returns:
12183  *	Amount of data buffer used.
12184  *
12185  * Context:
12186  *	Kernel context.
12187  */
12188 static size_t
12189 ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12190 {
12191 	uint32_t		cnt;
12192 	caddr_t			bp = bufp;
12193 	ql_25xx_fw_dump_t	*fw = ha->ql_dump_ptr;
12194 
12195 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12196 
12197 	(void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12198 	    ha->fw_major_version, ha->fw_minor_version,
12199 	    ha->fw_subminor_version, ha->fw_attributes);
12200 	bp += strlen(bp);
12201 
12202 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12203 	bp += strlen(bp);
12204 
12205 	(void) sprintf(bp, "\nHostRisc Registers");
12206 	bp += strlen(bp);
12207 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12208 		if (cnt % 8 == 0) {
12209 			(void) sprintf(bp++, "\n");
12210 		}
12211 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12212 		bp += 9;
12213 	}
12214 
12215 	(void) sprintf(bp, "\n\nPCIe Registers");
12216 	bp += strlen(bp);
12217 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12218 		if (cnt % 8 == 0) {
12219 			(void) sprintf(bp++, "\n");
12220 		}
12221 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12222 		bp += 9;
12223 	}
12224 
12225 	(void) strcat(bp, "\n\nHost Interface Registers");
12226 	bp += strlen(bp);
12227 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12228 		if (cnt % 8 == 0) {
12229 			(void) sprintf(bp++, "\n");
12230 		}
12231 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12232 		bp += 9;
12233 	}
12234 
12235 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12236 	bp += strlen(bp);
12237 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12238 		if (cnt % 8 == 0) {
12239 			(void) sprintf(bp++, "\n");
12240 		}
12241 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12242 		bp += 9;
12243 	}
12244 
12245 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12246 	    fw->risc_io);
12247 	bp += strlen(bp);
12248 
12249 	(void) sprintf(bp, "\n\nMailbox Registers");
12250 	bp += strlen(bp);
12251 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12252 		if (cnt % 16 == 0) {
12253 			(void) sprintf(bp++, "\n");
12254 		}
12255 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12256 		bp += 5;
12257 	}
12258 
12259 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12260 	bp += strlen(bp);
12261 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12262 		if (cnt % 8 == 0) {
12263 			(void) sprintf(bp++, "\n");
12264 		}
12265 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12266 		bp += 9;
12267 	}
12268 
12269 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12270 	bp += strlen(bp);
12271 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12272 		if (cnt % 8 == 0) {
12273 			(void) sprintf(bp++, "\n");
12274 		}
12275 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12276 		bp += 9;
12277 	}
12278 
12279 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12280 	bp += strlen(bp);
12281 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12282 		if (cnt % 8 == 0) {
12283 			(void) sprintf(bp++, "\n");
12284 		}
12285 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12286 		bp += 9;
12287 	}
12288 
12289 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12290 	bp += strlen(bp);
12291 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12292 		if (cnt % 8 == 0) {
12293 			(void) sprintf(bp++, "\n");
12294 		}
12295 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12296 		bp += 9;
12297 	}
12298 
12299 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12300 	bp += strlen(bp);
12301 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12302 		if (cnt % 8 == 0) {
12303 			(void) sprintf(bp++, "\n");
12304 		}
12305 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12306 		bp += 9;
12307 	}
12308 
12309 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12310 	bp += strlen(bp);
12311 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12312 		if (cnt % 8 == 0) {
12313 			(void) sprintf(bp++, "\n");
12314 		}
12315 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12316 		bp += 9;
12317 	}
12318 
12319 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12320 	bp += strlen(bp);
12321 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12322 		if (cnt % 8 == 0) {
12323 			(void) sprintf(bp++, "\n");
12324 		}
12325 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12326 		bp += 9;
12327 	}
12328 
12329 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12330 	bp += strlen(bp);
12331 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12332 		if (cnt % 8 == 0) {
12333 			(void) sprintf(bp++, "\n");
12334 		}
12335 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12336 		bp += 9;
12337 	}
12338 
12339 	(void) sprintf(bp, "\n\nASEQ-0 GP Registers");
12340 	bp += strlen(bp);
12341 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12342 		if (cnt % 8 == 0) {
12343 			(void) sprintf(bp++, "\n");
12344 		}
12345 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12346 		bp += 9;
12347 	}
12348 
12349 	(void) sprintf(bp, "\n\nASEQ-1 GP Registers");
12350 	bp += strlen(bp);
12351 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12352 		if (cnt % 8 == 0) {
12353 			(void) sprintf(bp++, "\n");
12354 		}
12355 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12356 		bp += 9;
12357 	}
12358 
12359 	(void) sprintf(bp, "\n\nASEQ-2 GP Registers");
12360 	bp += strlen(bp);
12361 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12362 		if (cnt % 8 == 0) {
12363 			(void) sprintf(bp++, "\n");
12364 		}
12365 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12366 		bp += 9;
12367 	}
12368 
12369 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12370 	bp += strlen(bp);
12371 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12372 		if (cnt % 8 == 0) {
12373 			(void) sprintf(bp++, "\n");
12374 		}
12375 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12376 		bp += 9;
12377 	}
12378 
12379 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12380 	bp += strlen(bp);
12381 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12382 		if (cnt % 8 == 0) {
12383 			(void) sprintf(bp++, "\n");
12384 		}
12385 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12386 		bp += 9;
12387 	}
12388 
12389 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12390 	bp += strlen(bp);
12391 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12392 		if (cnt % 8 == 0) {
12393 			(void) sprintf(bp++, "\n");
12394 		}
12395 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12396 		bp += 9;
12397 	}
12398 
12399 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12400 	bp += strlen(bp);
12401 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12402 		if (cnt % 8 == 0) {
12403 			(void) sprintf(bp++, "\n");
12404 		}
12405 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12406 		bp += 9;
12407 	}
12408 
12409 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12410 	bp += strlen(bp);
12411 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12412 		if (cnt % 8 == 0) {
12413 			(void) sprintf(bp++, "\n");
12414 		}
12415 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12416 		bp += 9;
12417 	}
12418 
12419 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12420 	bp += strlen(bp);
12421 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12422 		if (cnt % 8 == 0) {
12423 			(void) sprintf(bp++, "\n");
12424 		}
12425 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12426 		bp += 9;
12427 	}
12428 
12429 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12430 	bp += strlen(bp);
12431 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12432 		if (cnt % 8 == 0) {
12433 			(void) sprintf(bp++, "\n");
12434 		}
12435 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12436 		bp += 9;
12437 	}
12438 
12439 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12440 	bp += strlen(bp);
12441 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12442 		if (cnt % 8 == 0) {
12443 			(void) sprintf(bp++, "\n");
12444 		}
12445 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12446 		bp += 9;
12447 	}
12448 
12449 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12450 	bp += strlen(bp);
12451 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12452 		if (cnt % 8 == 0) {
12453 			(void) sprintf(bp++, "\n");
12454 		}
12455 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12456 		bp += 9;
12457 	}
12458 
12459 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12460 	bp += strlen(bp);
12461 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12462 		if (cnt % 8 == 0) {
12463 			(void) sprintf(bp++, "\n");
12464 		}
12465 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12466 		bp += 9;
12467 	}
12468 
12469 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12470 	bp += strlen(bp);
12471 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12472 		if (cnt % 8 == 0) {
12473 			(void) sprintf(bp++, "\n");
12474 		}
12475 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12476 		bp += 9;
12477 	}
12478 
12479 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12480 	bp += strlen(bp);
12481 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12482 		if (cnt % 8 == 0) {
12483 			(void) sprintf(bp++, "\n");
12484 		}
12485 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12486 		bp += 9;
12487 	}
12488 
12489 	(void) sprintf(bp, "\n\nRISC GP Registers");
12490 	bp += strlen(bp);
12491 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12492 		if (cnt % 8 == 0) {
12493 			(void) sprintf(bp++, "\n");
12494 		}
12495 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12496 		bp += 9;
12497 	}
12498 
12499 	(void) sprintf(bp, "\n\nLMC Registers");
12500 	bp += strlen(bp);
12501 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12502 		if (cnt % 8 == 0) {
12503 			(void) sprintf(bp++, "\n");
12504 		}
12505 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12506 		bp += 9;
12507 	}
12508 
12509 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12510 	bp += strlen(bp);
12511 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12512 		if (cnt % 8 == 0) {
12513 			(void) sprintf(bp++, "\n");
12514 		}
12515 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12516 		bp += 9;
12517 	}
12518 
12519 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12520 	bp += strlen(bp);
12521 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12522 		if (cnt % 8 == 0) {
12523 			(void) sprintf(bp++, "\n");
12524 		}
12525 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12526 		bp += 9;
12527 	}
12528 
12529 	(void) sprintf(bp, "\n\nCode RAM");
12530 	bp += strlen(bp);
12531 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12532 		if (cnt % 8 == 0) {
12533 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12534 			bp += 11;
12535 		}
12536 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12537 		bp += 9;
12538 	}
12539 
12540 	(void) sprintf(bp, "\n\nExternal Memory");
12541 	bp += strlen(bp);
12542 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12543 		if (cnt % 8 == 0) {
12544 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12545 			bp += 11;
12546 		}
12547 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12548 		bp += 9;
12549 	}
12550 
12551 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12552 	bp += strlen(bp);
12553 
12554 	(void) sprintf(bp, "\n\nRequest Queue");
12555 	bp += strlen(bp);
12556 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12557 		if (cnt % 8 == 0) {
12558 			(void) sprintf(bp, "\n%08x: ", cnt);
12559 			bp += strlen(bp);
12560 		}
12561 		(void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12562 		bp += strlen(bp);
12563 	}
12564 
12565 	(void) sprintf(bp, "\n\nResponse Queue");
12566 	bp += strlen(bp);
12567 	for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12568 		if (cnt % 8 == 0) {
12569 			(void) sprintf(bp, "\n%08x: ", cnt);
12570 			bp += strlen(bp);
12571 		}
12572 		(void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12573 		bp += strlen(bp);
12574 	}
12575 
12576 	if ((ha->cfg_flags & CFG_ENABLE_FWEXTTRACE) &&
12577 	    (ha->fwexttracebuf.bp != NULL)) {
12578 		uint32_t cnt_b = 0;
12579 		uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12580 
12581 		(void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12582 		bp += strlen(bp);
12583 		/* show data address as a byte address, data as long words */
12584 		for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12585 			cnt_b = cnt * 4;
12586 			if (cnt_b % 32 == 0) {
12587 				(void) sprintf(bp, "\n%08x: ",
12588 				    (int)(w64 + cnt_b));
12589 				bp += 11;
12590 			}
12591 			(void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12592 			bp += 9;
12593 		}
12594 	}
12595 
12596 	if ((ha->cfg_flags & CFG_ENABLE_FWFCETRACE) &&
12597 	    (ha->fwfcetracebuf.bp != NULL)) {
12598 		uint32_t cnt_b = 0;
12599 		uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12600 
12601 		(void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12602 		bp += strlen(bp);
12603 		/* show data address as a byte address, data as long words */
12604 		for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12605 			cnt_b = cnt * 4;
12606 			if (cnt_b % 32 == 0) {
12607 				(void) sprintf(bp, "\n%08x: ",
12608 				    (int)(w64 + cnt_b));
12609 				bp += 11;
12610 			}
12611 			(void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12612 			bp += 9;
12613 		}
12614 	}
12615 
12616 	(void) sprintf(bp, "\n\n");
12617 	bp += strlen(bp);
12618 
12619 	cnt = (uintptr_t)bp - (uintptr_t)bufp;
12620 
12621 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12622 
12623 	return (cnt);
12624 }
12625 
12626 /*
12627  * ql_2200_binary_fw_dump
12628  *
12629  * Input:
12630  *	ha:	adapter state pointer.
12631  *	fw:	firmware dump context pointer.
12632  *
12633  * Returns:
12634  *	ql local function return status code.
12635  *
12636  * Context:
12637  *	Interrupt or Kernel context, no mailbox commands allowed.
12638  */
12639 static int
12640 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12641 {
12642 	uint32_t	cnt;
12643 	uint16_t	risc_address;
12644 	clock_t		timer;
12645 	mbx_cmd_t	mc;
12646 	mbx_cmd_t	*mcp = &mc;
12647 	int		rval = QL_SUCCESS;
12648 
12649 	/* Disable ISP interrupts. */
12650 	WRT16_IO_REG(ha, ictrl, 0);
12651 	ADAPTER_STATE_LOCK(ha);
12652 	ha->flags &= ~INTERRUPTS_ENABLED;
12653 	ADAPTER_STATE_UNLOCK(ha);
12654 
12655 	/* Release mailbox registers. */
12656 	WRT16_IO_REG(ha, semaphore, 0);
12657 
12658 	/* Pause RISC. */
12659 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12660 	timer = 30000;
12661 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12662 		if (timer-- != 0) {
12663 			drv_usecwait(MILLISEC);
12664 		} else {
12665 			rval = QL_FUNCTION_TIMEOUT;
12666 			break;
12667 		}
12668 	}
12669 
12670 	if (rval == QL_SUCCESS) {
12671 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12672 		    sizeof (fw->pbiu_reg) / 2, 16);
12673 
12674 		/* In 2200 we only read 8 mailboxes */
12675 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12676 		    8, 16);
12677 
12678 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12679 		    sizeof (fw->dma_reg) / 2, 16);
12680 
12681 		WRT16_IO_REG(ha, ctrl_status, 0);
12682 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12683 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12684 
12685 		WRT16_IO_REG(ha, pcr, 0x2000);
12686 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12687 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12688 
12689 		WRT16_IO_REG(ha, pcr, 0x2100);
12690 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12691 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12692 
12693 		WRT16_IO_REG(ha, pcr, 0x2200);
12694 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12695 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12696 
12697 		WRT16_IO_REG(ha, pcr, 0x2300);
12698 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12699 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12700 
12701 		WRT16_IO_REG(ha, pcr, 0x2400);
12702 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12703 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12704 
12705 		WRT16_IO_REG(ha, pcr, 0x2500);
12706 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12707 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12708 
12709 		WRT16_IO_REG(ha, pcr, 0x2600);
12710 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12711 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12712 
12713 		WRT16_IO_REG(ha, pcr, 0x2700);
12714 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12715 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12716 
12717 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12718 		/* 2200 has only 16 registers */
12719 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12720 		    ha->iobase + 0x80, 16, 16);
12721 
12722 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12723 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12724 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12725 
12726 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12727 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12728 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12729 
12730 		/* Select FPM registers. */
12731 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12732 
12733 		/* FPM Soft Reset. */
12734 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12735 
12736 		/* Select frame buffer registers. */
12737 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12738 
12739 		/* Reset frame buffer FIFOs. */
12740 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12741 
12742 		/* Select RISC module registers. */
12743 		WRT16_IO_REG(ha, ctrl_status, 0);
12744 
12745 		/* Reset RISC module. */
12746 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
12747 
12748 		/* Reset ISP semaphore. */
12749 		WRT16_IO_REG(ha, semaphore, 0);
12750 
12751 		/* Release RISC module. */
12752 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12753 
12754 		/* Wait for RISC to recover from reset. */
12755 		timer = 30000;
12756 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
12757 			if (timer-- != 0) {
12758 				drv_usecwait(MILLISEC);
12759 			} else {
12760 				rval = QL_FUNCTION_TIMEOUT;
12761 				break;
12762 			}
12763 		}
12764 
12765 		/* Disable RISC pause on FPM parity error. */
12766 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
12767 	}
12768 
12769 	if (rval == QL_SUCCESS) {
12770 		/* Pause RISC. */
12771 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12772 		timer = 30000;
12773 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12774 			if (timer-- != 0) {
12775 				drv_usecwait(MILLISEC);
12776 			} else {
12777 				rval = QL_FUNCTION_TIMEOUT;
12778 				break;
12779 			}
12780 		}
12781 	}
12782 
12783 	if (rval == QL_SUCCESS) {
12784 		/* Set memory configuration and timing. */
12785 		WRT16_IO_REG(ha, mctr, 0xf2);
12786 
12787 		/* Release RISC. */
12788 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12789 
12790 		/* Get RISC SRAM. */
12791 		risc_address = 0x1000;
12792 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
12793 		for (cnt = 0; cnt < 0xf000; cnt++) {
12794 			WRT16_IO_REG(ha, mailbox[1], risc_address++);
12795 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
12796 			for (timer = 6000000; timer != 0; timer--) {
12797 				/* Check for pending interrupts. */
12798 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
12799 					if (RD16_IO_REG(ha, semaphore) &
12800 					    BIT_0) {
12801 						WRT16_IO_REG(ha, hccr,
12802 						    HC_CLR_RISC_INT);
12803 						mcp->mb[0] = RD16_IO_REG(ha,
12804 						    mailbox[0]);
12805 						fw->risc_ram[cnt] =
12806 						    RD16_IO_REG(ha,
12807 						    mailbox[2]);
12808 						WRT16_IO_REG(ha,
12809 						    semaphore, 0);
12810 						break;
12811 					}
12812 					WRT16_IO_REG(ha, hccr,
12813 					    HC_CLR_RISC_INT);
12814 				}
12815 				drv_usecwait(5);
12816 			}
12817 
12818 			if (timer == 0) {
12819 				rval = QL_FUNCTION_TIMEOUT;
12820 			} else {
12821 				rval = mcp->mb[0];
12822 			}
12823 
12824 			if (rval != QL_SUCCESS) {
12825 				break;
12826 			}
12827 		}
12828 	}
12829 
12830 	return (rval);
12831 }
12832 
12833 /*
12834  * ql_2300_binary_fw_dump
12835  *
12836  * Input:
12837  *	ha:	adapter state pointer.
12838  *	fw:	firmware dump context pointer.
12839  *
12840  * Returns:
12841  *	ql local function return status code.
12842  *
12843  * Context:
12844  *	Interrupt or Kernel context, no mailbox commands allowed.
12845  */
12846 static int
12847 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12848 {
12849 	clock_t	timer;
12850 	int	rval = QL_SUCCESS;
12851 
12852 	/* Disable ISP interrupts. */
12853 	WRT16_IO_REG(ha, ictrl, 0);
12854 	ADAPTER_STATE_LOCK(ha);
12855 	ha->flags &= ~INTERRUPTS_ENABLED;
12856 	ADAPTER_STATE_UNLOCK(ha);
12857 
12858 	/* Release mailbox registers. */
12859 	WRT16_IO_REG(ha, semaphore, 0);
12860 
12861 	/* Pause RISC. */
12862 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12863 	timer = 30000;
12864 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12865 		if (timer-- != 0) {
12866 			drv_usecwait(MILLISEC);
12867 		} else {
12868 			rval = QL_FUNCTION_TIMEOUT;
12869 			break;
12870 		}
12871 	}
12872 
12873 	if (rval == QL_SUCCESS) {
12874 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12875 		    sizeof (fw->pbiu_reg) / 2, 16);
12876 
12877 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
12878 		    sizeof (fw->risc_host_reg) / 2, 16);
12879 
12880 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
12881 		    sizeof (fw->mailbox_reg) / 2, 16);
12882 
12883 		WRT16_IO_REG(ha, ctrl_status, 0x40);
12884 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
12885 		    sizeof (fw->resp_dma_reg) / 2, 16);
12886 
12887 		WRT16_IO_REG(ha, ctrl_status, 0x50);
12888 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
12889 		    sizeof (fw->dma_reg) / 2, 16);
12890 
12891 		WRT16_IO_REG(ha, ctrl_status, 0);
12892 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12893 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12894 
12895 		WRT16_IO_REG(ha, pcr, 0x2000);
12896 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12897 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12898 
12899 		WRT16_IO_REG(ha, pcr, 0x2200);
12900 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12901 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12902 
12903 		WRT16_IO_REG(ha, pcr, 0x2400);
12904 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12905 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12906 
12907 		WRT16_IO_REG(ha, pcr, 0x2600);
12908 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12909 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12910 
12911 		WRT16_IO_REG(ha, pcr, 0x2800);
12912 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12913 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12914 
12915 		WRT16_IO_REG(ha, pcr, 0x2A00);
12916 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12917 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12918 
12919 		WRT16_IO_REG(ha, pcr, 0x2C00);
12920 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12921 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12922 
12923 		WRT16_IO_REG(ha, pcr, 0x2E00);
12924 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12925 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12926 
12927 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12928 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12929 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
12930 
12931 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12932 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12933 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12934 
12935 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12936 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12937 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12938 
12939 		/* Select FPM registers. */
12940 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12941 
12942 		/* FPM Soft Reset. */
12943 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12944 
12945 		/* Select frame buffer registers. */
12946 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12947 
12948 		/* Reset frame buffer FIFOs. */
12949 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12950 
12951 		/* Select RISC module registers. */
12952 		WRT16_IO_REG(ha, ctrl_status, 0);
12953 
12954 		/* Reset RISC module. */
12955 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
12956 
12957 		/* Reset ISP semaphore. */
12958 		WRT16_IO_REG(ha, semaphore, 0);
12959 
12960 		/* Release RISC module. */
12961 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12962 
12963 		/* Wait for RISC to recover from reset. */
12964 		timer = 30000;
12965 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
12966 			if (timer-- != 0) {
12967 				drv_usecwait(MILLISEC);
12968 			} else {
12969 				rval = QL_FUNCTION_TIMEOUT;
12970 				break;
12971 			}
12972 		}
12973 
12974 		/* Disable RISC pause on FPM parity error. */
12975 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
12976 	}
12977 
12978 	/* Get RISC SRAM. */
12979 	if (rval == QL_SUCCESS) {
12980 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
12981 	}
12982 	/* Get STACK SRAM. */
12983 	if (rval == QL_SUCCESS) {
12984 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
12985 	}
12986 	/* Get DATA SRAM. */
12987 	if (rval == QL_SUCCESS) {
12988 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
12989 	}
12990 
12991 	return (rval);
12992 }
12993 
12994 /*
12995  * ql_24xx_binary_fw_dump
12996  *
12997  * Input:
12998  *	ha:	adapter state pointer.
12999  *	fw:	firmware dump context pointer.
13000  *
13001  * Returns:
13002  *	ql local function return status code.
13003  *
13004  * Context:
13005  *	Interrupt or Kernel context, no mailbox commands allowed.
13006  */
13007 static int
13008 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13009 {
13010 	uint32_t	*reg32;
13011 	void		*bp;
13012 	clock_t		timer;
13013 	int		rval = QL_SUCCESS;
13014 
13015 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13016 
13017 	fw->hccr = RD32_IO_REG(ha, hccr);
13018 
13019 	/* Pause RISC. */
13020 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13021 		/* Disable ISP interrupts. */
13022 		WRT16_IO_REG(ha, ictrl, 0);
13023 
13024 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13025 		for (timer = 30000;
13026 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13027 		    rval == QL_SUCCESS; timer--) {
13028 			if (timer) {
13029 				drv_usecwait(100);
13030 			} else {
13031 				rval = QL_FUNCTION_TIMEOUT;
13032 			}
13033 		}
13034 	}
13035 
13036 	if (rval == QL_SUCCESS) {
13037 		/* Host interface registers. */
13038 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13039 		    sizeof (fw->host_reg) / 4, 32);
13040 
13041 		/* Disable ISP interrupts. */
13042 		WRT32_IO_REG(ha, ictrl, 0);
13043 		RD32_IO_REG(ha, ictrl);
13044 		ADAPTER_STATE_LOCK(ha);
13045 		ha->flags &= ~INTERRUPTS_ENABLED;
13046 		ADAPTER_STATE_UNLOCK(ha);
13047 
13048 		/* Shadow registers. */
13049 
13050 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13051 		RD32_IO_REG(ha, io_base_addr);
13052 
13053 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13054 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13055 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13056 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13057 
13058 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13059 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13060 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13061 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13062 
13063 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13064 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13065 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13066 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13067 
13068 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13069 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13070 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13071 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13072 
13073 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13074 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13075 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13076 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13077 
13078 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13079 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13080 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13081 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13082 
13083 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13084 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13085 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13086 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13087 
13088 		/* Mailbox registers. */
13089 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13090 		    sizeof (fw->mailbox_reg) / 2, 16);
13091 
13092 		/* Transfer sequence registers. */
13093 
13094 		/* XSEQ GP */
13095 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13096 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13097 		    16, 32);
13098 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13099 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13100 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13101 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13102 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13103 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13104 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13105 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13106 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13107 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13108 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13109 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13110 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13111 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13112 
13113 		/* XSEQ-0 */
13114 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13115 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13116 		    sizeof (fw->xseq_0_reg) / 4, 32);
13117 
13118 		/* XSEQ-1 */
13119 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13120 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13121 		    sizeof (fw->xseq_1_reg) / 4, 32);
13122 
13123 		/* Receive sequence registers. */
13124 
13125 		/* RSEQ GP */
13126 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13127 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13128 		    16, 32);
13129 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13130 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13131 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13132 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13133 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13134 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13135 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13136 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13137 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13138 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13139 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13140 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13141 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13142 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13143 
13144 		/* RSEQ-0 */
13145 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13146 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13147 		    sizeof (fw->rseq_0_reg) / 4, 32);
13148 
13149 		/* RSEQ-1 */
13150 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13151 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13152 		    sizeof (fw->rseq_1_reg) / 4, 32);
13153 
13154 		/* RSEQ-2 */
13155 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13156 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13157 		    sizeof (fw->rseq_2_reg) / 4, 32);
13158 
13159 		/* Command DMA registers. */
13160 
13161 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13162 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13163 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13164 
13165 		/* Queues. */
13166 
13167 		/* RequestQ0 */
13168 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13169 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13170 		    8, 32);
13171 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13172 
13173 		/* ResponseQ0 */
13174 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13175 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13176 		    8, 32);
13177 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13178 
13179 		/* RequestQ1 */
13180 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13181 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13182 		    8, 32);
13183 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13184 
13185 		/* Transmit DMA registers. */
13186 
13187 		/* XMT0 */
13188 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13189 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13190 		    16, 32);
13191 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13192 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13193 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13194 
13195 		/* XMT1 */
13196 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13197 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13198 		    16, 32);
13199 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13200 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13201 
13202 		/* XMT2 */
13203 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13204 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13205 		    16, 32);
13206 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13207 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13208 
13209 		/* XMT3 */
13210 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13211 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13212 		    16, 32);
13213 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13214 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13215 
13216 		/* XMT4 */
13217 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13218 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13219 		    16, 32);
13220 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13221 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13222 
13223 		/* XMT Common */
13224 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13225 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13226 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13227 
13228 		/* Receive DMA registers. */
13229 
13230 		/* RCVThread0 */
13231 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13232 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13233 		    ha->iobase + 0xC0, 16, 32);
13234 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13235 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13236 
13237 		/* RCVThread1 */
13238 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13239 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13240 		    ha->iobase + 0xC0, 16, 32);
13241 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13242 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13243 
13244 		/* RISC registers. */
13245 
13246 		/* RISC GP */
13247 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13248 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13249 		    16, 32);
13250 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13251 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13252 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13253 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13254 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13255 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13256 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13257 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13258 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13259 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13260 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13261 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13262 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13263 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13264 
13265 		/* Local memory controller registers. */
13266 
13267 		/* LMC */
13268 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13269 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13270 		    16, 32);
13271 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13272 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13273 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13274 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13275 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13276 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13277 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13278 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13279 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13280 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13281 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13282 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13283 
13284 		/* Fibre Protocol Module registers. */
13285 
13286 		/* FPM hardware */
13287 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13288 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13289 		    16, 32);
13290 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13291 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13292 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13293 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13294 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13295 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13296 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13297 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13298 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13299 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13300 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13301 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13302 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13303 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13304 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13305 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13306 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13307 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13308 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13309 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13310 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13311 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13312 
13313 		/* Frame Buffer registers. */
13314 
13315 		/* FB hardware */
13316 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13317 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13318 		    16, 32);
13319 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13320 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13321 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13322 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13323 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13324 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13325 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13326 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13327 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13328 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13329 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13330 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13331 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13332 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13333 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13334 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13335 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13336 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13337 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13338 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13339 	}
13340 
13341 	/* Get the request queue */
13342 	if (rval == QL_SUCCESS) {
13343 		uint32_t	cnt;
13344 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13345 
13346 		/* Sync DMA buffer. */
13347 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13348 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13349 		    DDI_DMA_SYNC_FORKERNEL);
13350 
13351 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13352 			fw->req_q[cnt] = *w32++;
13353 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13354 		}
13355 	}
13356 
13357 	/* Get the respons queue */
13358 	if (rval == QL_SUCCESS) {
13359 		uint32_t	cnt;
13360 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13361 
13362 		/* Sync DMA buffer. */
13363 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13364 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13365 		    DDI_DMA_SYNC_FORKERNEL);
13366 
13367 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13368 			fw->rsp_q[cnt] = *w32++;
13369 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13370 		}
13371 	}
13372 
13373 	/* Reset RISC. */
13374 	ql_reset_chip(ha);
13375 
13376 	/* Memory. */
13377 	if (rval == QL_SUCCESS) {
13378 		/* Code RAM. */
13379 		rval = ql_read_risc_ram(ha, 0x20000,
13380 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13381 	}
13382 	if (rval == QL_SUCCESS) {
13383 		/* External Memory. */
13384 		rval = ql_read_risc_ram(ha, 0x100000,
13385 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13386 	}
13387 
13388 	/* Get the extended trace buffer */
13389 	if (rval == QL_SUCCESS) {
13390 		if ((ha->cfg_flags & CFG_ENABLE_FWEXTTRACE) &&
13391 		    (ha->fwexttracebuf.bp != NULL)) {
13392 			uint32_t	cnt;
13393 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13394 
13395 			/* Sync DMA buffer. */
13396 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13397 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13398 
13399 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13400 				fw->ext_trace_buf[cnt] = *w32++;
13401 			}
13402 		}
13403 	}
13404 
13405 	/* Get the FC event trace buffer */
13406 	if (rval == QL_SUCCESS) {
13407 		if ((ha->cfg_flags & CFG_ENABLE_FWFCETRACE) &&
13408 		    (ha->fwfcetracebuf.bp != NULL)) {
13409 			uint32_t	cnt;
13410 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13411 
13412 			/* Sync DMA buffer. */
13413 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13414 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13415 
13416 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13417 				fw->fce_trace_buf[cnt] = *w32++;
13418 			}
13419 		}
13420 	}
13421 
13422 	if (rval != QL_SUCCESS) {
13423 		EL(ha, "failed=%xh\n", rval);
13424 	} else {
13425 		/*EMPTY*/
13426 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13427 	}
13428 
13429 	return (rval);
13430 }
13431 
13432 /*
13433  * ql_25xx_binary_fw_dump
13434  *
13435  * Input:
13436  *	ha:	adapter state pointer.
13437  *	fw:	firmware dump context pointer.
13438  *
13439  * Returns:
13440  *	ql local function return status code.
13441  *
13442  * Context:
13443  *	Interrupt or Kernel context, no mailbox commands allowed.
13444  */
13445 static int
13446 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13447 {
13448 	uint32_t	*reg32;
13449 	void		*bp;
13450 	clock_t		timer;
13451 	int		rval = QL_SUCCESS;
13452 
13453 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13454 	EL(ha, " started\n");
13455 
13456 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13457 
13458 	/* Pause RISC. */
13459 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13460 		/* Disable ISP interrupts. */
13461 		WRT16_IO_REG(ha, ictrl, 0);
13462 
13463 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13464 		for (timer = 30000;
13465 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13466 		    rval == QL_SUCCESS; timer--) {
13467 			if (timer) {
13468 				drv_usecwait(100);
13469 				if (timer % 10000 == 0) {
13470 					EL(ha, "risc pause %d\n", timer);
13471 				}
13472 			} else {
13473 				EL(ha, "risc pause timeout\n");
13474 				rval = QL_FUNCTION_TIMEOUT;
13475 			}
13476 		}
13477 	}
13478 
13479 	if (rval == QL_SUCCESS) {
13480 
13481 		/* Host Interface registers */
13482 
13483 		/* HostRisc registers. */
13484 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13485 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13486 		    16, 32);
13487 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13488 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13489 
13490 		/* PCIe registers. */
13491 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13492 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13493 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13494 		    3, 32);
13495 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13496 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13497 
13498 		/* Host interface registers. */
13499 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13500 		    sizeof (fw->host_reg) / 4, 32);
13501 
13502 		/* Disable ISP interrupts. */
13503 
13504 		WRT32_IO_REG(ha, ictrl, 0);
13505 		RD32_IO_REG(ha, ictrl);
13506 		ADAPTER_STATE_LOCK(ha);
13507 		ha->flags &= ~INTERRUPTS_ENABLED;
13508 		ADAPTER_STATE_UNLOCK(ha);
13509 
13510 		/* Shadow registers. */
13511 
13512 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13513 		RD32_IO_REG(ha, io_base_addr);
13514 
13515 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13516 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13517 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13518 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13519 
13520 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13521 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13522 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13523 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13524 
13525 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13526 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13527 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13528 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13529 
13530 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13531 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13532 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13533 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13534 
13535 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13536 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13537 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13538 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13539 
13540 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13541 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13542 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13543 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13544 
13545 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13546 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13547 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13548 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13549 
13550 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13551 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13552 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13553 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13554 
13555 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13556 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13557 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13558 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13559 
13560 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13561 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13562 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13563 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13564 
13565 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13566 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13567 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13568 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13569 
13570 		/* RISC I/O register. */
13571 
13572 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13573 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13574 		    1, 32);
13575 
13576 		/* Mailbox registers. */
13577 
13578 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13579 		    sizeof (fw->mailbox_reg) / 2, 16);
13580 
13581 		/* Transfer sequence registers. */
13582 
13583 		/* XSEQ GP */
13584 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13585 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13586 		    16, 32);
13587 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13588 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13589 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13590 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13591 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13592 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13593 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13594 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13595 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13596 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13597 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13598 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13599 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13600 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13601 
13602 		/* XSEQ-0 */
13603 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13604 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13605 		    16, 32);
13606 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13607 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13608 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13609 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13610 
13611 		/* XSEQ-1 */
13612 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13613 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13614 		    16, 32);
13615 
13616 		/* Receive sequence registers. */
13617 
13618 		/* RSEQ GP */
13619 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13620 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13621 		    16, 32);
13622 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13623 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13624 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13625 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13626 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13627 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13628 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13629 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13630 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13631 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13632 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13633 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13634 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13635 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13636 
13637 		/* RSEQ-0 */
13638 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13639 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13640 		    16, 32);
13641 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13642 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13643 
13644 		/* RSEQ-1 */
13645 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13646 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13647 		    sizeof (fw->rseq_1_reg) / 4, 32);
13648 
13649 		/* RSEQ-2 */
13650 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13651 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13652 		    sizeof (fw->rseq_2_reg) / 4, 32);
13653 
13654 		/* Auxiliary sequencer registers. */
13655 
13656 		/* ASEQ GP */
13657 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13658 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13659 		    16, 32);
13660 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13661 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13662 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13663 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13664 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13665 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13666 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13667 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13668 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13669 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13670 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13671 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13672 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13673 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13674 
13675 		/* ASEQ-0 */
13676 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13677 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13678 		    16, 32);
13679 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13680 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13681 
13682 		/* ASEQ-1 */
13683 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13684 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13685 		    16, 32);
13686 
13687 		/* ASEQ-2 */
13688 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13689 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13690 		    16, 32);
13691 
13692 		/* Command DMA registers. */
13693 
13694 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13695 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13696 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13697 
13698 		/* Queues. */
13699 
13700 		/* RequestQ0 */
13701 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13702 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13703 		    8, 32);
13704 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13705 
13706 		/* ResponseQ0 */
13707 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13708 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13709 		    8, 32);
13710 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13711 
13712 		/* RequestQ1 */
13713 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13714 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13715 		    8, 32);
13716 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13717 
13718 		/* Transmit DMA registers. */
13719 
13720 		/* XMT0 */
13721 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13722 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13723 		    16, 32);
13724 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13725 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13726 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13727 
13728 		/* XMT1 */
13729 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13730 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13731 		    16, 32);
13732 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13733 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13734 
13735 		/* XMT2 */
13736 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13737 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13738 		    16, 32);
13739 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13740 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13741 
13742 		/* XMT3 */
13743 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13744 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13745 		    16, 32);
13746 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13747 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13748 
13749 		/* XMT4 */
13750 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13751 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13752 		    16, 32);
13753 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13754 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13755 
13756 		/* XMT Common */
13757 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13758 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13759 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13760 
13761 		/* Receive DMA registers. */
13762 
13763 		/* RCVThread0 */
13764 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13765 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13766 		    ha->iobase + 0xC0, 16, 32);
13767 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13768 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13769 
13770 		/* RCVThread1 */
13771 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13772 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13773 		    ha->iobase + 0xC0, 16, 32);
13774 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13775 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13776 
13777 		/* RISC registers. */
13778 
13779 		/* RISC GP */
13780 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13781 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13782 		    16, 32);
13783 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13784 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13785 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13786 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13787 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13788 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13789 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13790 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13791 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13792 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13793 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13794 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13795 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13796 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13797 
13798 		/* Local memory controller (LMC) registers. */
13799 
13800 		/* LMC */
13801 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13802 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13803 		    16, 32);
13804 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13805 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13806 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13807 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13808 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13809 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13810 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13811 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13812 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13813 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13814 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13815 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13816 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
13817 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13818 
13819 		/* Fibre Protocol Module registers. */
13820 
13821 		/* FPM hardware */
13822 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13823 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13824 		    16, 32);
13825 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13826 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13827 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13828 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13829 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13830 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13831 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13832 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13833 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13834 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13835 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13836 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13837 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13838 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13839 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13840 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13841 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13842 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13843 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13844 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13845 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13846 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13847 
13848 		/* Frame Buffer registers. */
13849 
13850 		/* FB hardware */
13851 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13852 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13853 		    16, 32);
13854 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13855 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13856 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13857 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13858 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13859 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13860 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13861 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13862 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13863 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13864 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13865 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13866 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13867 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13868 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13869 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13870 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13871 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13872 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13873 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13874 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
13875 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13876 	}
13877 
13878 	/* Get the request queue */
13879 	if (rval == QL_SUCCESS) {
13880 		uint32_t	cnt;
13881 		uint32_t	*w32 = (uint32_t *)ha->request_ring_bp;
13882 
13883 		/* Sync DMA buffer. */
13884 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13885 		    REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13886 		    DDI_DMA_SYNC_FORKERNEL);
13887 
13888 		for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13889 			fw->req_q[cnt] = *w32++;
13890 			LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13891 		}
13892 	}
13893 
13894 	/* Get the respons queue */
13895 	if (rval == QL_SUCCESS) {
13896 		uint32_t	cnt;
13897 		uint32_t	*w32 = (uint32_t *)ha->response_ring_bp;
13898 
13899 		/* Sync DMA buffer. */
13900 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
13901 		    RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13902 		    DDI_DMA_SYNC_FORKERNEL);
13903 
13904 		for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13905 			fw->rsp_q[cnt] = *w32++;
13906 			LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13907 		}
13908 	}
13909 
13910 	/* Reset RISC. */
13911 
13912 	ql_reset_chip(ha);
13913 
13914 	/* Memory. */
13915 
13916 	if (rval == QL_SUCCESS) {
13917 		/* Code RAM. */
13918 		rval = ql_read_risc_ram(ha, 0x20000,
13919 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13920 	}
13921 	if (rval == QL_SUCCESS) {
13922 		/* External Memory. */
13923 		rval = ql_read_risc_ram(ha, 0x100000,
13924 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13925 	}
13926 
13927 	/* Get the FC event trace buffer */
13928 	if (rval == QL_SUCCESS) {
13929 		if ((ha->cfg_flags & CFG_ENABLE_FWFCETRACE) &&
13930 		    (ha->fwfcetracebuf.bp != NULL)) {
13931 			uint32_t	cnt;
13932 			uint32_t	*w32 = ha->fwfcetracebuf.bp;
13933 
13934 			/* Sync DMA buffer. */
13935 			(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13936 			    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13937 
13938 			for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13939 				fw->fce_trace_buf[cnt] = *w32++;
13940 			}
13941 		}
13942 	}
13943 
13944 	/* Get the extended trace buffer */
13945 	if (rval == QL_SUCCESS) {
13946 		if ((ha->cfg_flags & CFG_ENABLE_FWEXTTRACE) &&
13947 		    (ha->fwexttracebuf.bp != NULL)) {
13948 			uint32_t	cnt;
13949 			uint32_t	*w32 = ha->fwexttracebuf.bp;
13950 
13951 			/* Sync DMA buffer. */
13952 			(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13953 			    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13954 
13955 			for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13956 				fw->ext_trace_buf[cnt] = *w32++;
13957 			}
13958 		}
13959 	}
13960 
13961 	if (rval != QL_SUCCESS) {
13962 		EL(ha, "failed=%xh\n", rval);
13963 	} else {
13964 		/*EMPTY*/
13965 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13966 	}
13967 
13968 	return (rval);
13969 }
13970 
13971 /*
13972  * ql_read_risc_ram
13973  *	Reads RISC RAM one word at a time.
13974  *	Risc interrupts must be disabled when this routine is called.
13975  *
13976  * Input:
13977  *	ha:	adapter state pointer.
13978  *	risc_address:	RISC code start address.
13979  *	len:		Number of words.
13980  *	buf:		buffer pointer.
13981  *
13982  * Returns:
13983  *	ql local function return status code.
13984  *
13985  * Context:
13986  *	Interrupt or Kernel context, no mailbox commands allowed.
13987  */
13988 static int
13989 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
13990     void *buf)
13991 {
13992 	uint32_t	cnt;
13993 	uint16_t	stat;
13994 	clock_t		timer;
13995 	uint16_t	*buf16 = (uint16_t *)buf;
13996 	uint32_t	*buf32 = (uint32_t *)buf;
13997 	int		rval = QL_SUCCESS;
13998 
13999 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
14000 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
14001 		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
14002 		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
14003 		CFG_IST(ha, CFG_CTRL_2425) ?
14004 		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
14005 		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
14006 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
14007 			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
14008 				stat = (uint16_t)
14009 				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
14010 				if ((stat == 1) || (stat == 0x10)) {
14011 					if (CFG_IST(ha, CFG_CTRL_2425)) {
14012 						buf32[cnt] = SHORT_TO_LONG(
14013 						    RD16_IO_REG(ha,
14014 						    mailbox[2]),
14015 						    RD16_IO_REG(ha,
14016 						    mailbox[3]));
14017 					} else {
14018 						buf16[cnt] =
14019 						    RD16_IO_REG(ha, mailbox[2]);
14020 					}
14021 
14022 					break;
14023 				} else if ((stat == 2) || (stat == 0x11)) {
14024 					rval = RD16_IO_REG(ha, mailbox[0]);
14025 					break;
14026 				}
14027 				if (CFG_IST(ha, CFG_CTRL_2425)) {
14028 					WRT32_IO_REG(ha, hccr,
14029 					    HC24_CLR_RISC_INT);
14030 					RD32_IO_REG(ha, hccr);
14031 				} else {
14032 					WRT16_IO_REG(ha, hccr,
14033 					    HC_CLR_RISC_INT);
14034 				}
14035 			}
14036 			drv_usecwait(5);
14037 		}
14038 		if (CFG_IST(ha, CFG_CTRL_2425)) {
14039 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
14040 			RD32_IO_REG(ha, hccr);
14041 		} else {
14042 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
14043 			WRT16_IO_REG(ha, semaphore, 0);
14044 		}
14045 
14046 		if (timer == 0) {
14047 			rval = QL_FUNCTION_TIMEOUT;
14048 		}
14049 	}
14050 
14051 	return (rval);
14052 }
14053 
14054 /*
14055  * ql_read_regs
14056  *	Reads adapter registers to buffer.
14057  *
14058  * Input:
14059  *	ha:	adapter state pointer.
14060  *	buf:	buffer pointer.
14061  *	reg:	start address.
14062  *	count:	number of registers.
14063  *	wds:	register size.
14064  *
14065  * Context:
14066  *	Interrupt or Kernel context, no mailbox commands allowed.
14067  */
14068 static void *
14069 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
14070     uint8_t wds)
14071 {
14072 	uint32_t	*bp32, *reg32;
14073 	uint16_t	*bp16, *reg16;
14074 	uint8_t		*bp8, *reg8;
14075 
14076 	switch (wds) {
14077 	case 32:
14078 		bp32 = buf;
14079 		reg32 = reg;
14080 		while (count--) {
14081 			*bp32++ = RD_REG_DWORD(ha, reg32++);
14082 		}
14083 		return (bp32);
14084 	case 16:
14085 		bp16 = buf;
14086 		reg16 = reg;
14087 		while (count--) {
14088 			*bp16++ = RD_REG_WORD(ha, reg16++);
14089 		}
14090 		return (bp16);
14091 	case 8:
14092 		bp8 = buf;
14093 		reg8 = reg;
14094 		while (count--) {
14095 			*bp8++ = RD_REG_BYTE(ha, reg8++);
14096 		}
14097 		return (bp8);
14098 	default:
14099 		EL(ha, "Unknown word size=%d\n", wds);
14100 		return (buf);
14101 	}
14102 }
14103 
14104 static int
14105 ql_save_config_regs(dev_info_t *dip)
14106 {
14107 	ql_adapter_state_t	*ha;
14108 	int			ret;
14109 	ql_config_space_t	chs;
14110 	caddr_t			prop = "ql-config-space";
14111 
14112 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14113 	ASSERT(ha != NULL);
14114 	if (ha == NULL) {
14115 		QL_PRINT_2(CE_CONT, "no adapter ptr\n");
14116 		return (DDI_FAILURE);
14117 	}
14118 
14119 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14120 
14121 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14122 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
14123 	    1) {
14124 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14125 		return (DDI_SUCCESS);
14126 	}
14127 
14128 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
14129 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
14130 	    PCI_CONF_HEADER);
14131 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14132 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
14133 		    PCI_BCNF_BCNTRL);
14134 	}
14135 
14136 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
14137 	    PCI_CONF_CACHE_LINESZ);
14138 
14139 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14140 	    PCI_CONF_LATENCY_TIMER);
14141 
14142 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14143 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
14144 		    PCI_BCNF_LATENCY_TIMER);
14145 	}
14146 
14147 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
14148 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
14149 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
14150 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
14151 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
14152 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
14153 
14154 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14155 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
14156 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
14157 
14158 	if (ret != DDI_PROP_SUCCESS) {
14159 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
14160 		    QL_NAME, ddi_get_instance(dip), prop);
14161 		return (DDI_FAILURE);
14162 	}
14163 
14164 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14165 
14166 	return (DDI_SUCCESS);
14167 }
14168 
14169 static int
14170 ql_restore_config_regs(dev_info_t *dip)
14171 {
14172 	ql_adapter_state_t	*ha;
14173 	uint_t			elements;
14174 	ql_config_space_t	*chs_p;
14175 	caddr_t			prop = "ql-config-space";
14176 
14177 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
14178 	ASSERT(ha != NULL);
14179 	if (ha == NULL) {
14180 		QL_PRINT_2(CE_CONT, "no adapter ptr\n");
14181 		return (DDI_FAILURE);
14182 	}
14183 
14184 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14185 
14186 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
14187 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
14188 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
14189 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
14190 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
14191 		return (DDI_FAILURE);
14192 	}
14193 
14194 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
14195 
14196 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14197 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
14198 		    chs_p->chs_bridge_control);
14199 	}
14200 
14201 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
14202 	    chs_p->chs_cache_line_size);
14203 
14204 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
14205 	    chs_p->chs_latency_timer);
14206 
14207 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
14208 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
14209 		    chs_p->chs_sec_latency_timer);
14210 	}
14211 
14212 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
14213 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
14214 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
14215 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
14216 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
14217 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
14218 
14219 	ddi_prop_free(chs_p);
14220 
14221 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
14222 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
14223 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
14224 		    QL_NAME, ddi_get_instance(dip), prop);
14225 	}
14226 
14227 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14228 
14229 	return (DDI_SUCCESS);
14230 }
14231 
14232 uint8_t
14233 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
14234 {
14235 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14236 		return (ddi_get8(ha->sbus_config_handle,
14237 		    (uint8_t *)(ha->sbus_config_base + off)));
14238 	}
14239 
14240 #ifdef KERNEL_32
14241 	return (pci_config_getb(ha->pci_handle, off));
14242 #else
14243 	return (pci_config_get8(ha->pci_handle, off));
14244 #endif
14245 }
14246 
14247 uint16_t
14248 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
14249 {
14250 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14251 		return (ddi_get16(ha->sbus_config_handle,
14252 		    (uint16_t *)(ha->sbus_config_base + off)));
14253 	}
14254 
14255 #ifdef KERNEL_32
14256 	return (pci_config_getw(ha->pci_handle, off));
14257 #else
14258 	return (pci_config_get16(ha->pci_handle, off));
14259 #endif
14260 }
14261 
14262 uint32_t
14263 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
14264 {
14265 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14266 		return (ddi_get32(ha->sbus_config_handle,
14267 		    (uint32_t *)(ha->sbus_config_base + off)));
14268 	}
14269 
14270 #ifdef KERNEL_32
14271 	return (pci_config_getl(ha->pci_handle, off));
14272 #else
14273 	return (pci_config_get32(ha->pci_handle, off));
14274 #endif
14275 }
14276 
14277 void
14278 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
14279 {
14280 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14281 		ddi_put8(ha->sbus_config_handle,
14282 		    (uint8_t *)(ha->sbus_config_base + off), val);
14283 	} else {
14284 #ifdef KERNEL_32
14285 		pci_config_putb(ha->pci_handle, off, val);
14286 #else
14287 		pci_config_put8(ha->pci_handle, off, val);
14288 #endif
14289 	}
14290 }
14291 
14292 void
14293 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
14294 {
14295 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14296 		ddi_put16(ha->sbus_config_handle,
14297 		    (uint16_t *)(ha->sbus_config_base + off), val);
14298 	} else {
14299 #ifdef KERNEL_32
14300 		pci_config_putw(ha->pci_handle, off, val);
14301 #else
14302 		pci_config_put16(ha->pci_handle, off, val);
14303 #endif
14304 	}
14305 }
14306 
14307 void
14308 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
14309 {
14310 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14311 		ddi_put32(ha->sbus_config_handle,
14312 		    (uint32_t *)(ha->sbus_config_base + off), val);
14313 	} else {
14314 #ifdef KERNEL_32
14315 		pci_config_putl(ha->pci_handle, off, val);
14316 #else
14317 		pci_config_put32(ha->pci_handle, off, val);
14318 #endif
14319 	}
14320 }
14321 
14322 /*
14323  * ql_halt
14324  *	Waits for commands that are running to finish and
14325  *	if they do not, commands are aborted.
14326  *	Finally the adapter is reset.
14327  *
14328  * Input:
14329  *	ha:	adapter state pointer.
14330  *	pwr:	power state.
14331  *
14332  * Context:
14333  *	Kernel context.
14334  */
14335 static void
14336 ql_halt(ql_adapter_state_t *ha, int pwr)
14337 {
14338 	uint32_t	cnt;
14339 	ql_tgt_t	*tq;
14340 	ql_srb_t	*sp;
14341 	uint16_t	index;
14342 	ql_link_t	*link;
14343 
14344 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14345 
14346 	/* Wait for all commands running to finish. */
14347 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
14348 		for (link = ha->dev[index].first; link != NULL;
14349 		    link = link->next) {
14350 			tq = link->base_address;
14351 			(void) ql_abort_device(ha, tq, 0);
14352 
14353 			/* Wait for 30 seconds for commands to finish. */
14354 			for (cnt = 3000; cnt != 0; cnt--) {
14355 				/* Acquire device queue lock. */
14356 				DEVICE_QUEUE_LOCK(tq);
14357 				if (tq->outcnt == 0) {
14358 					/* Release device queue lock. */
14359 					DEVICE_QUEUE_UNLOCK(tq);
14360 					break;
14361 				} else {
14362 					/* Release device queue lock. */
14363 					DEVICE_QUEUE_UNLOCK(tq);
14364 					ql_delay(ha, 10000);
14365 				}
14366 			}
14367 
14368 			/* Finish any commands waiting for more status. */
14369 			if (ha->status_srb != NULL) {
14370 				sp = ha->status_srb;
14371 				ha->status_srb = NULL;
14372 				sp->cmd.next = NULL;
14373 				ql_done(&sp->cmd);
14374 			}
14375 
14376 			/* Abort commands that did not finish. */
14377 			if (cnt == 0) {
14378 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
14379 				    cnt++) {
14380 					if (ha->pending_cmds.first != NULL) {
14381 						ql_start_iocb(ha, NULL);
14382 						cnt = 1;
14383 					}
14384 					sp = ha->outstanding_cmds[cnt];
14385 					if (sp != NULL &&
14386 					    sp->lun_queue->target_queue ==
14387 					    tq) {
14388 						(void) ql_abort((opaque_t)ha,
14389 						    sp->pkt, 0);
14390 					}
14391 				}
14392 			}
14393 		}
14394 	}
14395 
14396 	/* Shutdown IP. */
14397 	if (ha->flags & IP_INITIALIZED) {
14398 		(void) ql_shutdown_ip(ha);
14399 	}
14400 
14401 	/* Stop all timers. */
14402 	ADAPTER_STATE_LOCK(ha);
14403 	ha->port_retry_timer = 0;
14404 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
14405 	ha->watchdog_timer = 0;
14406 	ADAPTER_STATE_UNLOCK(ha);
14407 
14408 	if (pwr == PM_LEVEL_D3) {
14409 		ADAPTER_STATE_LOCK(ha);
14410 		ha->flags &= ~ONLINE;
14411 		ADAPTER_STATE_UNLOCK(ha);
14412 
14413 		/* Reset ISP chip. */
14414 		ql_reset_chip(ha);
14415 	}
14416 
14417 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14418 }
14419 
14420 /*
14421  * ql_get_dma_mem
14422  *	Function used to allocate dma memory.
14423  *
14424  * Input:
14425  *	ha:			adapter state pointer.
14426  *	mem:			pointer to dma memory object.
14427  *	size:			size of the request in bytes
14428  *
14429  * Returns:
14430  *	qn local function return status code.
14431  *
14432  * Context:
14433  *	Kernel context.
14434  */
14435 int
14436 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
14437     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
14438 {
14439 	int	rval;
14440 
14441 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
14442 
14443 	mem->size = size;
14444 	mem->type = allocation_type;
14445 	mem->cookie_count = 1;
14446 
14447 	switch (alignment) {
14448 	case QL_DMA_DATA_ALIGN:
14449 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
14450 		break;
14451 	case QL_DMA_RING_ALIGN:
14452 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
14453 		break;
14454 	default:
14455 		EL(ha, "failed, unknown alignment type %x\n", alignment);
14456 		break;
14457 	}
14458 
14459 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
14460 		ql_free_phys(ha, mem);
14461 		EL(ha, "failed, alloc_phys=%xh\n", rval);
14462 	}
14463 
14464 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
14465 
14466 	return (rval);
14467 }
14468 
14469 /*
14470  * ql_alloc_phys
14471  *	Function used to allocate memory and zero it.
14472  *	Memory is below 4 GB.
14473  *
14474  * Input:
14475  *	ha:			adapter state pointer.
14476  *	mem:			pointer to dma memory object.
14477  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14478  *	mem->cookie_count	number of segments allowed.
14479  *	mem->type		memory allocation type.
14480  *	mem->size		memory size.
14481  *	mem->alignment		memory alignment.
14482  *
14483  * Returns:
14484  *	qn local function return status code.
14485  *
14486  * Context:
14487  *	Kernel context.
14488  */
14489 int
14490 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14491 {
14492 	size_t			rlen;
14493 	ddi_dma_attr_t		dma_attr;
14494 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
14495 
14496 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14497 
14498 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14499 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14500 
14501 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
14502 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14503 
14504 	/*
14505 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
14506 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
14507 	 * to make sure buffer has enough room for overrun.
14508 	 */
14509 	if (mem->size & 7) {
14510 		mem->size += 8 - (mem->size & 7);
14511 	}
14512 
14513 	mem->flags = DDI_DMA_CONSISTENT;
14514 
14515 	/*
14516 	 * Allocate DMA memory for command.
14517 	 */
14518 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14519 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14520 	    DDI_SUCCESS) {
14521 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14522 		mem->dma_handle = NULL;
14523 		return (QL_MEMORY_ALLOC_FAILED);
14524 	}
14525 
14526 	switch (mem->type) {
14527 	case KERNEL_MEM:
14528 		mem->bp = kmem_zalloc(mem->size, sleep);
14529 		break;
14530 	case BIG_ENDIAN_DMA:
14531 	case LITTLE_ENDIAN_DMA:
14532 	case NO_SWAP_DMA:
14533 		if (mem->type == BIG_ENDIAN_DMA) {
14534 			acc_attr.devacc_attr_endian_flags =
14535 			    DDI_STRUCTURE_BE_ACC;
14536 		} else if (mem->type == NO_SWAP_DMA) {
14537 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
14538 		}
14539 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
14540 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14541 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
14542 		    &mem->acc_handle) == DDI_SUCCESS) {
14543 			bzero(mem->bp, mem->size);
14544 			/* ensure we got what we asked for (32bit) */
14545 			if (dma_attr.dma_attr_addr_hi == NULL) {
14546 				if (mem->cookie.dmac_notused != NULL) {
14547 					EL(ha, "failed, ddi_dma_mem_alloc "
14548 					    "returned 64 bit DMA address\n");
14549 					ql_free_phys(ha, mem);
14550 					return (QL_MEMORY_ALLOC_FAILED);
14551 				}
14552 			}
14553 		} else {
14554 			mem->acc_handle = NULL;
14555 			mem->bp = NULL;
14556 		}
14557 		break;
14558 	default:
14559 		EL(ha, "failed, unknown type=%xh\n", mem->type);
14560 		mem->acc_handle = NULL;
14561 		mem->bp = NULL;
14562 		break;
14563 	}
14564 
14565 	if (mem->bp == NULL) {
14566 		EL(ha, "failed, ddi_dma_mem_alloc\n");
14567 		ddi_dma_free_handle(&mem->dma_handle);
14568 		mem->dma_handle = NULL;
14569 		return (QL_MEMORY_ALLOC_FAILED);
14570 	}
14571 
14572 	mem->flags |= DDI_DMA_RDWR;
14573 
14574 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14575 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
14576 		ql_free_phys(ha, mem);
14577 		return (QL_MEMORY_ALLOC_FAILED);
14578 	}
14579 
14580 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14581 
14582 	return (QL_SUCCESS);
14583 }
14584 
14585 /*
14586  * ql_free_phys
14587  *	Function used to free physical memory.
14588  *
14589  * Input:
14590  *	ha:	adapter state pointer.
14591  *	mem:	pointer to dma memory object.
14592  *
14593  * Context:
14594  *	Kernel context.
14595  */
14596 void
14597 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
14598 {
14599 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14600 
14601 	if (mem != NULL && mem->dma_handle != NULL) {
14602 		ql_unbind_dma_buffer(ha, mem);
14603 		switch (mem->type) {
14604 		case KERNEL_MEM:
14605 			if (mem->bp != NULL) {
14606 				kmem_free(mem->bp, mem->size);
14607 			}
14608 			break;
14609 		case LITTLE_ENDIAN_DMA:
14610 		case BIG_ENDIAN_DMA:
14611 		case NO_SWAP_DMA:
14612 			if (mem->acc_handle != NULL) {
14613 				ddi_dma_mem_free(&mem->acc_handle);
14614 				mem->acc_handle = NULL;
14615 			}
14616 			break;
14617 		default:
14618 			break;
14619 		}
14620 		mem->bp = NULL;
14621 		ddi_dma_free_handle(&mem->dma_handle);
14622 		mem->dma_handle = NULL;
14623 	}
14624 
14625 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14626 }
14627 
14628 /*
14629  * ql_alloc_dma_resouce.
14630  *	Allocates DMA resource for buffer.
14631  *
14632  * Input:
14633  *	ha:			adapter state pointer.
14634  *	mem:			pointer to dma memory object.
14635  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14636  *	mem->cookie_count	number of segments allowed.
14637  *	mem->type		memory allocation type.
14638  *	mem->size		memory size.
14639  *	mem->bp			pointer to memory or struct buf
14640  *
14641  * Returns:
14642  *	qn local function return status code.
14643  *
14644  * Context:
14645  *	Kernel context.
14646  */
14647 int
14648 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14649 {
14650 	ddi_dma_attr_t	dma_attr;
14651 
14652 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14653 
14654 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14655 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14656 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14657 
14658 	/*
14659 	 * Allocate DMA handle for command.
14660 	 */
14661 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14662 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14663 	    DDI_SUCCESS) {
14664 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14665 		mem->dma_handle = NULL;
14666 		return (QL_MEMORY_ALLOC_FAILED);
14667 	}
14668 
14669 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
14670 
14671 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14672 		EL(ha, "failed, bind_dma_buffer\n");
14673 		ddi_dma_free_handle(&mem->dma_handle);
14674 		mem->dma_handle = NULL;
14675 		return (QL_MEMORY_ALLOC_FAILED);
14676 	}
14677 
14678 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14679 
14680 	return (QL_SUCCESS);
14681 }
14682 
14683 /*
14684  * ql_free_dma_resource
14685  *	Frees DMA resources.
14686  *
14687  * Input:
14688  *	ha:		adapter state pointer.
14689  *	mem:		pointer to dma memory object.
14690  *	mem->dma_handle	DMA memory handle.
14691  *
14692  * Context:
14693  *	Kernel context.
14694  */
14695 void
14696 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
14697 {
14698 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14699 
14700 	ql_free_phys(ha, mem);
14701 
14702 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14703 }
14704 
14705 /*
14706  * ql_bind_dma_buffer
14707  *	Binds DMA buffer.
14708  *
14709  * Input:
14710  *	ha:			adapter state pointer.
14711  *	mem:			pointer to dma memory object.
14712  *	sleep:			KM_SLEEP or KM_NOSLEEP.
14713  *	mem->dma_handle		DMA memory handle.
14714  *	mem->cookie_count	number of segments allowed.
14715  *	mem->type		memory allocation type.
14716  *	mem->size		memory size.
14717  *	mem->bp			pointer to memory or struct buf
14718  *
14719  * Returns:
14720  *	mem->cookies		pointer to list of cookies.
14721  *	mem->cookie_count	number of cookies.
14722  *	status			success = DDI_DMA_MAPPED
14723  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
14724  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
14725  *				DDI_DMA_TOOBIG
14726  *
14727  * Context:
14728  *	Kernel context.
14729  */
14730 static int
14731 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14732 {
14733 	int			rval;
14734 	ddi_dma_cookie_t	*cookiep;
14735 	uint32_t		cnt = mem->cookie_count;
14736 
14737 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14738 
14739 	if (mem->type == STRUCT_BUF_MEMORY) {
14740 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
14741 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14742 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
14743 	} else {
14744 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
14745 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
14746 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
14747 		    &mem->cookie_count);
14748 	}
14749 
14750 	if (rval == DDI_DMA_MAPPED) {
14751 		if (mem->cookie_count > cnt) {
14752 			(void) ddi_dma_unbind_handle(mem->dma_handle);
14753 			EL(ha, "failed, cookie_count %d > %d\n",
14754 			    mem->cookie_count, cnt);
14755 			rval = DDI_DMA_TOOBIG;
14756 		} else {
14757 			if (mem->cookie_count > 1) {
14758 				if (mem->cookies = kmem_zalloc(
14759 				    sizeof (ddi_dma_cookie_t) *
14760 				    mem->cookie_count, sleep)) {
14761 					*mem->cookies = mem->cookie;
14762 					cookiep = mem->cookies;
14763 					for (cnt = 1; cnt < mem->cookie_count;
14764 					    cnt++) {
14765 						ddi_dma_nextcookie(
14766 						    mem->dma_handle,
14767 						    ++cookiep);
14768 					}
14769 				} else {
14770 					(void) ddi_dma_unbind_handle(
14771 					    mem->dma_handle);
14772 					EL(ha, "failed, kmem_zalloc\n");
14773 					rval = DDI_DMA_NORESOURCES;
14774 				}
14775 			} else {
14776 				/*
14777 				 * It has been reported that dmac_size at times
14778 				 * may be incorrect on sparc machines so for
14779 				 * sparc machines that only have one segment
14780 				 * use the buffer size instead.
14781 				 */
14782 				mem->cookies = &mem->cookie;
14783 				mem->cookies->dmac_size = mem->size;
14784 			}
14785 		}
14786 	}
14787 
14788 	if (rval != DDI_DMA_MAPPED) {
14789 		EL(ha, "failed=%xh\n", rval);
14790 	} else {
14791 		/*EMPTY*/
14792 		QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14793 	}
14794 
14795 	return (rval);
14796 }
14797 
14798 /*
14799  * ql_unbind_dma_buffer
14800  *	Unbinds DMA buffer.
14801  *
14802  * Input:
14803  *	ha:			adapter state pointer.
14804  *	mem:			pointer to dma memory object.
14805  *	mem->dma_handle		DMA memory handle.
14806  *	mem->cookies		pointer to cookie list.
14807  *	mem->cookie_count	number of cookies.
14808  *
14809  * Context:
14810  *	Kernel context.
14811  */
14812 /* ARGSUSED */
14813 static void
14814 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
14815 {
14816 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14817 
14818 	(void) ddi_dma_unbind_handle(mem->dma_handle);
14819 	if (mem->cookie_count > 1) {
14820 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
14821 		    mem->cookie_count);
14822 		mem->cookies = NULL;
14823 	}
14824 	mem->cookie_count = 0;
14825 
14826 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14827 }
14828 
14829 static int
14830 ql_suspend_adapter(ql_adapter_state_t *ha)
14831 {
14832 	clock_t timer;
14833 
14834 	/*
14835 	 * First we will claim mbox ownership so that no
14836 	 * thread using mbox hangs when we disable the
14837 	 * interrupt in the middle of it.
14838 	 */
14839 	MBX_REGISTER_LOCK(ha);
14840 
14841 	/* Check for mailbox available, if not wait for signal. */
14842 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
14843 		ha->mailbox_flags = (uint8_t)
14844 		    (ha->mailbox_flags | MBX_WANT_FLG);
14845 
14846 		/* 30 seconds from now */
14847 		timer = ddi_get_lbolt();
14848 		timer += 32 * drv_usectohz(1000000);
14849 		if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
14850 		    timer) == -1) {
14851 
14852 			/* Release mailbox register lock. */
14853 			MBX_REGISTER_UNLOCK(ha);
14854 			EL(ha, "failed, Suspend mbox");
14855 			return (QL_FUNCTION_TIMEOUT);
14856 		}
14857 	}
14858 
14859 	/* Set busy flag. */
14860 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
14861 	MBX_REGISTER_UNLOCK(ha);
14862 
14863 	(void) ql_wait_outstanding(ha);
14864 
14865 	/*
14866 	 * here we are sure that there will not be any mbox interrupt.
14867 	 * So, let's make sure that we return back all the outstanding
14868 	 * cmds as well as internally queued commands.
14869 	 */
14870 	ql_halt(ha, PM_LEVEL_D0);
14871 
14872 	if (ha->power_level != PM_LEVEL_D3) {
14873 		/* Disable ISP interrupts. */
14874 		WRT16_IO_REG(ha, ictrl, 0);
14875 	}
14876 
14877 	ADAPTER_STATE_LOCK(ha);
14878 	ha->flags &= ~INTERRUPTS_ENABLED;
14879 	ADAPTER_STATE_UNLOCK(ha);
14880 
14881 	MBX_REGISTER_LOCK(ha);
14882 	/* Reset busy status. */
14883 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
14884 
14885 	/* If thread is waiting for mailbox go signal it to start. */
14886 	if (ha->mailbox_flags & MBX_WANT_FLG) {
14887 		ha->mailbox_flags = (uint8_t)
14888 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
14889 		cv_broadcast(&ha->cv_mbx_wait);
14890 	}
14891 	/* Release mailbox register lock. */
14892 	MBX_REGISTER_UNLOCK(ha);
14893 
14894 	return (QL_SUCCESS);
14895 }
14896 
14897 /*
14898  * ql_add_link_b
14899  *	Add link to the end of the chain.
14900  *
14901  * Input:
14902  *	head = Head of link list.
14903  *	link = link to be added.
14904  *	LOCK must be already obtained.
14905  *
14906  * Context:
14907  *	Interrupt or Kernel context, no mailbox commands allowed.
14908  */
14909 void
14910 ql_add_link_b(ql_head_t *head, ql_link_t *link)
14911 {
14912 	ASSERT(link->base_address != NULL);
14913 
14914 	/* at the end there isn't a next */
14915 	link->next = NULL;
14916 
14917 	if ((link->prev = head->last) == NULL) {
14918 		head->first = link;
14919 	} else {
14920 		head->last->next = link;
14921 	}
14922 
14923 	head->last = link;
14924 	link->head = head;	/* the queue we're on */
14925 }
14926 
14927 /*
14928  * ql_add_link_t
14929  *	Add link to the beginning of the chain.
14930  *
14931  * Input:
14932  *	head = Head of link list.
14933  *	link = link to be added.
14934  *	LOCK must be already obtained.
14935  *
14936  * Context:
14937  *	Interrupt or Kernel context, no mailbox commands allowed.
14938  */
14939 void
14940 ql_add_link_t(ql_head_t *head, ql_link_t *link)
14941 {
14942 	ASSERT(link->base_address != NULL);
14943 
14944 	link->prev = NULL;
14945 
14946 	if ((link->next = head->first) == NULL)	{
14947 		head->last = link;
14948 	} else {
14949 		head->first->prev = link;
14950 	}
14951 
14952 	head->first = link;
14953 	link->head = head;	/* the queue we're on */
14954 }
14955 
14956 /*
14957  * ql_remove_link
14958  *	Remove a link from the chain.
14959  *
14960  * Input:
14961  *	head = Head of link list.
14962  *	link = link to be removed.
14963  *	LOCK must be already obtained.
14964  *
14965  * Context:
14966  *	Interrupt or Kernel context, no mailbox commands allowed.
14967  */
14968 void
14969 ql_remove_link(ql_head_t *head, ql_link_t *link)
14970 {
14971 	ASSERT(link->base_address != NULL);
14972 
14973 	if (link->prev != NULL) {
14974 		if ((link->prev->next = link->next) == NULL) {
14975 			head->last = link->prev;
14976 		} else {
14977 			link->next->prev = link->prev;
14978 		}
14979 	} else if ((head->first = link->next) == NULL) {
14980 		head->last = NULL;
14981 	} else {
14982 		head->first->prev = NULL;
14983 	}
14984 
14985 	/* not on a queue any more */
14986 	link->prev = link->next = NULL;
14987 	link->head = NULL;
14988 }
14989 
14990 /*
14991  * ql_chg_endian
14992  *	Change endianess of byte array.
14993  *
14994  * Input:
14995  *	buf = array pointer.
14996  *	size = size of array in bytes.
14997  *
14998  * Context:
14999  *	Interrupt or Kernel context, no mailbox commands allowed.
15000  */
15001 void
15002 ql_chg_endian(uint8_t buf[], size_t size)
15003 {
15004 	uint8_t byte;
15005 	size_t  cnt1;
15006 	size_t  cnt;
15007 
15008 	cnt1 = size - 1;
15009 	for (cnt = 0; cnt < size / 2; cnt++) {
15010 		byte = buf[cnt1];
15011 		buf[cnt1] = buf[cnt];
15012 		buf[cnt] = byte;
15013 		cnt1--;
15014 	}
15015 }
15016 
15017 /*
15018  * ql_bstr_to_dec
15019  *	Convert decimal byte string to number.
15020  *
15021  * Input:
15022  *	s:	byte string pointer.
15023  *	ans:	interger pointer for number.
15024  *	size:	number of ascii bytes.
15025  *
15026  * Returns:
15027  *	success = number of ascii bytes processed.
15028  *
15029  * Context:
15030  *	Kernel/Interrupt context.
15031  */
15032 static int
15033 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
15034 {
15035 	int			mul, num, cnt, pos;
15036 	char			*str;
15037 
15038 	/* Calculate size of number. */
15039 	if (size == 0) {
15040 		for (str = s; *str >= '0' && *str <= '9'; str++) {
15041 			size++;
15042 		}
15043 	}
15044 
15045 	*ans = 0;
15046 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
15047 		if (*s >= '0' && *s <= '9') {
15048 			num = *s++ - '0';
15049 		} else {
15050 			break;
15051 		}
15052 
15053 		for (mul = 1, pos = 1; pos < size; pos++) {
15054 			mul *= 10;
15055 		}
15056 		*ans += num * mul;
15057 	}
15058 
15059 	return (cnt);
15060 }
15061 
15062 /*
15063  * ql_delay
15064  *	Calls delay routine if threads are not suspended, otherwise, busy waits
15065  *	Minimum = 1 tick = 10ms
15066  *
15067  * Input:
15068  *	dly = delay time in microseconds.
15069  *
15070  * Context:
15071  *	Kernel or Interrupt context, no mailbox commands allowed.
15072  */
15073 void
15074 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
15075 {
15076 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
15077 		drv_usecwait(usecs);
15078 	} else {
15079 		delay(drv_usectohz(usecs));
15080 	}
15081 }
15082 
15083 /*
15084  * ql_stall_drv
15085  *	Stalls one or all driver instances, waits for 30 seconds.
15086  *
15087  * Input:
15088  *	ha:		adapter state pointer or NULL for all.
15089  *	options:	BIT_0 --> leave driver stalled on exit if
15090  *				  failed.
15091  *
15092  * Returns:
15093  *	ql local function return status code.
15094  *
15095  * Context:
15096  *	Kernel context.
15097  */
15098 int
15099 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
15100 {
15101 	ql_link_t		*link;
15102 	ql_adapter_state_t	*ha2;
15103 	uint32_t		timer;
15104 
15105 	/* Wait for 30 seconds for daemons unstall. */
15106 	timer = 3000;
15107 	link = ha == NULL ? ql_hba.first : &ha->hba;
15108 	while (link != NULL && timer) {
15109 		ha2 = link->base_address;
15110 
15111 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
15112 
15113 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15114 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15115 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
15116 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
15117 			link = ha == NULL ? link->next : NULL;
15118 			continue;
15119 		}
15120 
15121 		ql_delay(ha, 10000);
15122 		timer--;
15123 		link = ha == NULL ? ql_hba.first : &ha->hba;
15124 	}
15125 
15126 	if (ha2 != NULL && timer == 0) {
15127 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
15128 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
15129 		    "unstalled"));
15130 		if (options & BIT_0) {
15131 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15132 		}
15133 		return (QL_FUNCTION_TIMEOUT);
15134 	}
15135 
15136 	QL_PRINT_3(CE_CONT, "done\n");
15137 
15138 	return (QL_SUCCESS);
15139 }
15140 
15141 /*
15142  * ql_restart_driver
15143  *	Restarts one or all driver instances.
15144  *
15145  * Input:
15146  *	ha:	adapter state pointer or NULL for all.
15147  *
15148  * Context:
15149  *	Kernel context.
15150  */
15151 void
15152 ql_restart_driver(ql_adapter_state_t *ha)
15153 {
15154 	ql_link_t		*link;
15155 	ql_adapter_state_t	*ha2;
15156 	uint32_t		timer;
15157 
15158 	QL_PRINT_3(CE_CONT, "entered\n");
15159 
15160 	/* Tell all daemons to unstall. */
15161 	link = ha == NULL ? ql_hba.first : &ha->hba;
15162 	while (link != NULL) {
15163 		ha2 = link->base_address;
15164 
15165 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
15166 
15167 		link = ha == NULL ? link->next : NULL;
15168 	}
15169 
15170 	/* Wait for 30 seconds for all daemons unstall. */
15171 	timer = 3000;
15172 	link = ha == NULL ? ql_hba.first : &ha->hba;
15173 	while (link != NULL && timer) {
15174 		ha2 = link->base_address;
15175 
15176 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
15177 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
15178 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
15179 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
15180 			    ha2->instance, ha2->vp_index);
15181 			ql_restart_queues(ha2);
15182 			link = ha == NULL ? link->next : NULL;
15183 			continue;
15184 		}
15185 
15186 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
15187 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
15188 
15189 		ql_delay(ha, 10000);
15190 		timer--;
15191 		link = ha == NULL ? ql_hba.first : &ha->hba;
15192 	}
15193 
15194 	QL_PRINT_3(CE_CONT, "exiting\n");
15195 }
15196 
15197 /*
15198  * ql_setup_interrupts
15199  *	Sets up interrupts based on the HBA's and platform's
15200  *	capabilities (e.g., legacy / MSI / FIXED).
15201  *
15202  * Input:
15203  *	ha = adapter state pointer.
15204  *
15205  * Returns:
15206  *	DDI_SUCCESS or DDI_FAILURE.
15207  *
15208  * Context:
15209  *	Kernel context.
15210  */
15211 static int
15212 ql_setup_interrupts(ql_adapter_state_t *ha)
15213 {
15214 	int32_t		rval = DDI_FAILURE;
15215 	int32_t		i;
15216 	int32_t		itypes = 0;
15217 
15218 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15219 
15220 	/*
15221 	 * The Solaris Advanced Interrupt Functions (aif) are only
15222 	 * supported on s10U1 or greater.
15223 	 */
15224 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
15225 		EL(ha, "interrupt framework is not supported or is "
15226 		    "disabled, using legacy\n");
15227 		return (ql_legacy_intr(ha));
15228 	} else if (ql_os_release_level == 10) {
15229 		/*
15230 		 * See if the advanced interrupt functions (aif) are
15231 		 * in the kernel
15232 		 */
15233 		void	*fptr = (void *)&ddi_intr_get_supported_types;
15234 
15235 		if (fptr == NULL) {
15236 			EL(ha, "aif is not supported, using legacy "
15237 			    "interrupts (rev)\n");
15238 			return (ql_legacy_intr(ha));
15239 		}
15240 	}
15241 
15242 	/* See what types of interrupts this HBA and platform support */
15243 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
15244 	    DDI_SUCCESS) {
15245 		EL(ha, "get supported types failed, rval=%xh, "
15246 		    "assuming FIXED\n", i);
15247 		itypes = DDI_INTR_TYPE_FIXED;
15248 	}
15249 
15250 	EL(ha, "supported types are: %xh\n", itypes);
15251 
15252 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
15253 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
15254 		EL(ha, "successful MSI-X setup\n");
15255 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
15256 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
15257 		EL(ha, "successful MSI setup\n");
15258 	} else {
15259 		rval = ql_setup_fixed(ha);
15260 	}
15261 
15262 	if (rval != DDI_SUCCESS) {
15263 		EL(ha, "failed, aif, rval=%xh\n", rval);
15264 	} else {
15265 		/*EMPTY*/
15266 		QL_PRINT_3(CE_CONT, "(%d): exiting\n");
15267 	}
15268 
15269 	return (rval);
15270 }
15271 
15272 /*
15273  * ql_setup_msi
15274  *	Set up aif MSI interrupts
15275  *
15276  * Input:
15277  *	ha = adapter state pointer.
15278  *
15279  * Returns:
15280  *	DDI_SUCCESS or DDI_FAILURE.
15281  *
15282  * Context:
15283  *	Kernel context.
15284  */
15285 static int
15286 ql_setup_msi(ql_adapter_state_t *ha)
15287 {
15288 	int32_t		count = 0;
15289 	int32_t		avail = 0;
15290 	int32_t		actual = 0;
15291 	int32_t		msitype = DDI_INTR_TYPE_MSI;
15292 	int32_t		ret;
15293 	ql_ifunc_t	itrfun[10] = {0};
15294 
15295 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15296 
15297 	if (ql_disable_msi != 0) {
15298 		EL(ha, "MSI is disabled by user\n");
15299 		return (DDI_FAILURE);
15300 	}
15301 
15302 	/* MSI support is only suported on 24xx HBA's. */
15303 	if (!(CFG_IST(ha, CFG_CTRL_2425))) {
15304 		EL(ha, "HBA does not support MSI\n");
15305 		return (DDI_FAILURE);
15306 	}
15307 
15308 	/* Get number of MSI interrupts the system supports */
15309 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15310 	    DDI_SUCCESS) || count == 0) {
15311 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15312 		return (DDI_FAILURE);
15313 	}
15314 
15315 	/* Get number of available MSI interrupts */
15316 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15317 	    DDI_SUCCESS) || avail == 0) {
15318 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15319 		return (DDI_FAILURE);
15320 	}
15321 
15322 	/* MSI requires only 1.  */
15323 	count = 1;
15324 	itrfun[0].ifunc = &ql_isr_aif;
15325 
15326 	/* Allocate space for interrupt handles */
15327 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15328 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15329 
15330 	ha->iflags |= IFLG_INTR_MSI;
15331 
15332 	/* Allocate the interrupts */
15333 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
15334 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
15335 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15336 		    "actual=%xh\n", ret, count, actual);
15337 		ql_release_intr(ha);
15338 		return (DDI_FAILURE);
15339 	}
15340 
15341 	ha->intr_cnt = actual;
15342 
15343 	/* Get interrupt priority */
15344 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15345 	    DDI_SUCCESS) {
15346 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15347 		ql_release_intr(ha);
15348 		return (ret);
15349 	}
15350 
15351 	/* Add the interrupt handler */
15352 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
15353 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
15354 		EL(ha, "failed, intr_add ret=%xh\n", ret);
15355 		ql_release_intr(ha);
15356 		return (ret);
15357 	}
15358 
15359 	/* Setup mutexes */
15360 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15361 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15362 		ql_release_intr(ha);
15363 		return (ret);
15364 	}
15365 
15366 	/* Get the capabilities */
15367 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15368 
15369 	/* Enable interrupts */
15370 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15371 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15372 		    DDI_SUCCESS) {
15373 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15374 			ql_destroy_mutex(ha);
15375 			ql_release_intr(ha);
15376 			return (ret);
15377 		}
15378 	} else {
15379 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
15380 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15381 			ql_destroy_mutex(ha);
15382 			ql_release_intr(ha);
15383 			return (ret);
15384 		}
15385 	}
15386 
15387 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15388 
15389 	return (DDI_SUCCESS);
15390 }
15391 
15392 /*
15393  * ql_setup_msix
15394  *	Set up aif MSI-X interrupts
15395  *
15396  * Input:
15397  *	ha = adapter state pointer.
15398  *
15399  * Returns:
15400  *	DDI_SUCCESS or DDI_FAILURE.
15401  *
15402  * Context:
15403  *	Kernel context.
15404  */
15405 static int
15406 ql_setup_msix(ql_adapter_state_t *ha)
15407 {
15408 	uint16_t	hwvect;
15409 	int32_t		count = 0;
15410 	int32_t		avail = 0;
15411 	int32_t		actual = 0;
15412 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
15413 	int32_t		ret;
15414 	uint32_t	i;
15415 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
15416 
15417 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15418 
15419 	if (ql_disable_msix != 0) {
15420 		EL(ha, "MSI-X is disabled by user\n");
15421 		return (DDI_FAILURE);
15422 	}
15423 
15424 	/*
15425 	 * MSI-X support is only available on 24xx HBA's that have
15426 	 * rev A2 parts (revid = 3) or greater.
15427 	 */
15428 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
15429 	    (ha->device_id == 0x8432))) {
15430 		EL(ha, "HBA does not support MSI-X\n");
15431 		return (DDI_FAILURE);
15432 	}
15433 
15434 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
15435 		EL(ha, "HBA does not support MSI-X (revid)\n");
15436 		return (DDI_FAILURE);
15437 	}
15438 
15439 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
15440 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
15441 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
15442 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
15443 		return (DDI_FAILURE);
15444 	}
15445 
15446 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
15447 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
15448 	    ql_pci_config_get16(ha, 0x7e) :
15449 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
15450 
15451 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
15452 
15453 	if (hwvect < QL_MSIX_MAXAIF) {
15454 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
15455 		    QL_MSIX_MAXAIF, hwvect);
15456 		return (DDI_FAILURE);
15457 	}
15458 
15459 	/* Get number of MSI-X interrupts the platform h/w supports */
15460 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15461 	    DDI_SUCCESS) || count == 0) {
15462 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15463 		return (DDI_FAILURE);
15464 	}
15465 
15466 	/* Get number of available system interrupts */
15467 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15468 	    DDI_SUCCESS) || avail == 0) {
15469 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15470 		return (DDI_FAILURE);
15471 	}
15472 
15473 	/* Fill out the intr table */
15474 	count = QL_MSIX_MAXAIF;
15475 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
15476 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
15477 
15478 	/* Allocate space for interrupt handles */
15479 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
15480 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
15481 		ha->hsize = 0;
15482 		EL(ha, "failed, unable to allocate htable space\n");
15483 		return (DDI_FAILURE);
15484 	}
15485 
15486 	ha->iflags |= IFLG_INTR_MSIX;
15487 
15488 	/* Allocate the interrupts */
15489 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
15490 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
15491 	    actual < QL_MSIX_MAXAIF) {
15492 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15493 		    "actual=%xh\n", ret, count, actual);
15494 		ql_release_intr(ha);
15495 		return (DDI_FAILURE);
15496 	}
15497 
15498 	ha->intr_cnt = actual;
15499 
15500 	/* Get interrupt priority */
15501 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15502 	    DDI_SUCCESS) {
15503 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15504 		ql_release_intr(ha);
15505 		return (ret);
15506 	}
15507 
15508 	/* Add the interrupt handlers */
15509 	for (i = 0; i < actual; i++) {
15510 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
15511 		    (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
15512 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
15513 			    actual, ret);
15514 			ql_release_intr(ha);
15515 			return (ret);
15516 		}
15517 	}
15518 
15519 	/*
15520 	 * duplicate the rest of the intr's
15521 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
15522 	 */
15523 #ifdef __sparc
15524 	for (i = actual; i < hwvect; i++) {
15525 		if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
15526 		    &ha->htable[i])) != DDI_SUCCESS) {
15527 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
15528 			    i, actual, ret);
15529 			ql_release_intr(ha);
15530 			return (ret);
15531 		}
15532 	}
15533 #endif
15534 
15535 	/* Setup mutexes */
15536 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15537 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15538 		ql_release_intr(ha);
15539 		return (ret);
15540 	}
15541 
15542 	/* Get the capabilities */
15543 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15544 
15545 	/* Enable interrupts */
15546 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15547 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15548 		    DDI_SUCCESS) {
15549 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15550 			ql_destroy_mutex(ha);
15551 			ql_release_intr(ha);
15552 			return (ret);
15553 		}
15554 	} else {
15555 		for (i = 0; i < ha->intr_cnt; i++) {
15556 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
15557 			    DDI_SUCCESS) {
15558 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
15559 				ql_destroy_mutex(ha);
15560 				ql_release_intr(ha);
15561 				return (ret);
15562 			}
15563 		}
15564 	}
15565 
15566 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15567 
15568 	return (DDI_SUCCESS);
15569 }
15570 
15571 /*
15572  * ql_setup_fixed
15573  *	Sets up aif FIXED interrupts
15574  *
15575  * Input:
15576  *	ha = adapter state pointer.
15577  *
15578  * Returns:
15579  *	DDI_SUCCESS or DDI_FAILURE.
15580  *
15581  * Context:
15582  *	Kernel context.
15583  */
15584 static int
15585 ql_setup_fixed(ql_adapter_state_t *ha)
15586 {
15587 	int32_t		count = 0;
15588 	int32_t		actual = 0;
15589 	int32_t		ret;
15590 	uint32_t	i;
15591 
15592 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15593 
15594 	/* Get number of fixed interrupts the system supports */
15595 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
15596 	    &count)) != DDI_SUCCESS) || count == 0) {
15597 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15598 		return (DDI_FAILURE);
15599 	}
15600 
15601 	ha->iflags |= IFLG_INTR_FIXED;
15602 
15603 	/* Allocate space for interrupt handles */
15604 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15605 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15606 
15607 	/* Allocate the interrupts */
15608 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
15609 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
15610 	    actual < count) {
15611 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
15612 		    "actual=%xh\n", ret, count, actual);
15613 		ql_release_intr(ha);
15614 		return (DDI_FAILURE);
15615 	}
15616 
15617 	ha->intr_cnt = actual;
15618 
15619 	/* Get interrupt priority */
15620 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15621 	    DDI_SUCCESS) {
15622 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15623 		ql_release_intr(ha);
15624 		return (ret);
15625 	}
15626 
15627 	/* Add the interrupt handlers */
15628 	for (i = 0; i < ha->intr_cnt; i++) {
15629 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
15630 		    (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
15631 			EL(ha, "failed, intr_add ret=%xh\n", ret);
15632 			ql_release_intr(ha);
15633 			return (ret);
15634 		}
15635 	}
15636 
15637 	/* Setup mutexes */
15638 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15639 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15640 		ql_release_intr(ha);
15641 		return (ret);
15642 	}
15643 
15644 	/* Enable interrupts */
15645 	for (i = 0; i < ha->intr_cnt; i++) {
15646 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
15647 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15648 			ql_destroy_mutex(ha);
15649 			ql_release_intr(ha);
15650 			return (ret);
15651 		}
15652 	}
15653 
15654 	EL(ha, "using FIXED interupts\n");
15655 
15656 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15657 
15658 	return (DDI_SUCCESS);
15659 }
15660 
15661 /*
15662  * ql_disable_intr
15663  *	Disables interrupts
15664  *
15665  * Input:
15666  *	ha = adapter state pointer.
15667  *
15668  * Returns:
15669  *
15670  * Context:
15671  *	Kernel context.
15672  */
15673 static void
15674 ql_disable_intr(ql_adapter_state_t *ha)
15675 {
15676 	uint32_t	i, rval;
15677 
15678 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15679 
15680 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15681 
15682 		/* Disable legacy interrupts */
15683 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
15684 
15685 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
15686 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
15687 
15688 		/* Remove AIF block interrupts (MSI) */
15689 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
15690 		    != DDI_SUCCESS) {
15691 			EL(ha, "failed intr block disable, rval=%x\n", rval);
15692 		}
15693 
15694 	} else {
15695 
15696 		/* Remove AIF non-block interrupts (fixed).  */
15697 		for (i = 0; i < ha->intr_cnt; i++) {
15698 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
15699 			    DDI_SUCCESS) {
15700 				EL(ha, "failed intr disable, intr#=%xh, "
15701 				    "rval=%xh\n", i, rval);
15702 			}
15703 		}
15704 	}
15705 
15706 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15707 }
15708 
15709 /*
15710  * ql_release_intr
15711  *	Releases aif legacy interrupt resources
15712  *
15713  * Input:
15714  *	ha = adapter state pointer.
15715  *
15716  * Returns:
15717  *
15718  * Context:
15719  *	Kernel context.
15720  */
15721 static void
15722 ql_release_intr(ql_adapter_state_t *ha)
15723 {
15724 	int32_t 	i;
15725 
15726 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15727 
15728 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15729 		QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15730 		return;
15731 	}
15732 
15733 	ha->iflags &= ~(IFLG_INTR_AIF);
15734 	if (ha->htable != NULL && ha->hsize > 0) {
15735 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
15736 		while (i-- > 0) {
15737 			if (ha->htable[i] == 0) {
15738 				EL(ha, "htable[%x]=0h\n", i);
15739 				continue;
15740 			}
15741 
15742 			(void) ddi_intr_disable(ha->htable[i]);
15743 
15744 			if (i < ha->intr_cnt) {
15745 				(void) ddi_intr_remove_handler(ha->htable[i]);
15746 			}
15747 
15748 			(void) ddi_intr_free(ha->htable[i]);
15749 		}
15750 
15751 		kmem_free(ha->htable, ha->hsize);
15752 		ha->htable = NULL;
15753 	}
15754 
15755 	ha->hsize = 0;
15756 	ha->intr_cnt = 0;
15757 	ha->intr_pri = 0;
15758 	ha->intr_cap = 0;
15759 
15760 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15761 }
15762 
15763 /*
15764  * ql_legacy_intr
15765  *	Sets up legacy interrupts.
15766  *
15767  *	NB: Only to be used if AIF (Advanced Interupt Framework)
15768  *	    if NOT in the kernel.
15769  *
15770  * Input:
15771  *	ha = adapter state pointer.
15772  *
15773  * Returns:
15774  *	DDI_SUCCESS or DDI_FAILURE.
15775  *
15776  * Context:
15777  *	Kernel context.
15778  */
15779 static int
15780 ql_legacy_intr(ql_adapter_state_t *ha)
15781 {
15782 	int	rval = DDI_SUCCESS;
15783 
15784 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15785 
15786 	/* Setup mutexes */
15787 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
15788 		EL(ha, "failed, mutex init\n");
15789 		return (DDI_FAILURE);
15790 	}
15791 
15792 	/* Setup standard/legacy interrupt handler */
15793 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
15794 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
15795 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
15796 		    QL_NAME, ha->instance);
15797 		ql_destroy_mutex(ha);
15798 		rval = DDI_FAILURE;
15799 	}
15800 
15801 	if (rval == DDI_SUCCESS) {
15802 		ha->iflags |= IFLG_INTR_LEGACY;
15803 		EL(ha, "using legacy interrupts\n");
15804 	}
15805 
15806 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15807 
15808 	return (rval);
15809 }
15810 
15811 /*
15812  * ql_init_mutex
15813  *	Initializes mutex's
15814  *
15815  * Input:
15816  *	ha = adapter state pointer.
15817  *
15818  * Returns:
15819  *	DDI_SUCCESS or DDI_FAILURE.
15820  *
15821  * Context:
15822  *	Kernel context.
15823  */
15824 static int
15825 ql_init_mutex(ql_adapter_state_t *ha)
15826 {
15827 	int	ret;
15828 	void	*intr;
15829 
15830 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15831 
15832 	if (ha->iflags & IFLG_INTR_AIF) {
15833 		intr = (void *)(uintptr_t)ha->intr_pri;
15834 	} else {
15835 		/* Get iblock cookies to initialize mutexes */
15836 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
15837 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
15838 			EL(ha, "failed, get_iblock: %xh\n", ret);
15839 			return (DDI_FAILURE);
15840 		}
15841 		intr = (void *)ha->iblock_cookie;
15842 	}
15843 
15844 	/* mutexes to protect the adapter state structure. */
15845 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
15846 
15847 	/* mutex to protect the ISP response ring. */
15848 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
15849 
15850 	/* mutex to protect the mailbox registers. */
15851 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
15852 
15853 	/* power management protection */
15854 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
15855 
15856 	/* Mailbox wait and interrupt conditional variable. */
15857 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
15858 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
15859 
15860 	/* mutex to protect the ISP request ring. */
15861 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
15862 
15863 	/* Unsolicited buffer conditional variable. */
15864 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
15865 
15866 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
15867 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
15868 
15869 	/* Suspended conditional variable. */
15870 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
15871 
15872 	/* mutex to protect task daemon context. */
15873 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
15874 
15875 	/* Task_daemon thread conditional variable. */
15876 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
15877 
15878 	/* mutex to protect notify acknowledge list */
15879 	mutex_init(&ha->ql_nack_mtx, NULL, MUTEX_DRIVER, intr);
15880 	ha->ql_nack = NULL;
15881 
15882 	/* mutex to protect diag port manage interface */
15883 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
15884 
15885 	/* mutex to protect per instance f/w dump flags and buffer */
15886 	mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
15887 
15888 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15889 
15890 	return (DDI_SUCCESS);
15891 }
15892 
15893 /*
15894  * ql_destroy_mutex
15895  *	Destroys mutex's
15896  *
15897  * Input:
15898  *	ha = adapter state pointer.
15899  *
15900  * Returns:
15901  *
15902  * Context:
15903  *	Kernel context.
15904  */
15905 static void
15906 ql_destroy_mutex(ql_adapter_state_t *ha)
15907 {
15908 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15909 
15910 	mutex_destroy(&ha->task_daemon_mutex);
15911 	cv_destroy(&ha->cv_task_daemon);
15912 	cv_destroy(&ha->cv_dr_suspended);
15913 	mutex_destroy(&ha->ub_mutex);
15914 	cv_destroy(&ha->cv_ub);
15915 	mutex_destroy(&ha->req_ring_mutex);
15916 	mutex_destroy(&ha->mbx_mutex);
15917 	cv_destroy(&ha->cv_mbx_intr);
15918 	cv_destroy(&ha->cv_mbx_wait);
15919 	mutex_destroy(&ha->pm_mutex);
15920 	mutex_destroy(&ha->intr_mutex);
15921 	mutex_destroy(&ha->portmutex);
15922 	mutex_destroy(&ha->mutex);
15923 	mutex_destroy(&ha->ql_nack_mtx);
15924 	mutex_destroy(&ha->cache_mutex);
15925 	mutex_destroy(&ha->dump_mutex);
15926 
15927 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15928 }
15929 
15930 /*
15931  * ql_fwmodule_resolve
15932  *	Loads and resolves external firmware module and symbols
15933  *
15934  * Input:
15935  *	ha:		adapter state pointer.
15936  *
15937  * Returns:
15938  *	ql local function return status code:
15939  *		QL_SUCCESS - external f/w module module and symbols resolved
15940  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
15941  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
15942  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
15943  * Context:
15944  *	Kernel context.
15945  *
15946  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
15947  * could switch to a tighter scope around acutal download (and add an extra
15948  * ddi_modopen for module opens that occur before root is mounted).
15949  *
15950  */
15951 uint32_t
15952 ql_fwmodule_resolve(ql_adapter_state_t *ha)
15953 {
15954 	int8_t			module[128];
15955 	int8_t			fw_version[128];
15956 	uint32_t		rval = QL_SUCCESS;
15957 	caddr_t			code, code02;
15958 	uint8_t			*p_ucfw;
15959 	uint16_t		*p_usaddr, *p_uslen;
15960 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
15961 	uint32_t		*p_uiaddr02, *p_uilen02;
15962 	struct fw_table		*fwt;
15963 	extern struct fw_table	fw_table[];
15964 
15965 	if (ha->fw_module != NULL) {
15966 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
15967 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
15968 		    ha->fw_subminor_version);
15969 		return (rval);
15970 	}
15971 
15972 	/* make sure the fw_class is in the fw_table of supported classes */
15973 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
15974 		if (fwt->fw_class == ha->fw_class)
15975 			break;			/* match */
15976 	}
15977 	if (fwt->fw_version == NULL) {
15978 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
15979 		    "in driver's fw_table", QL_NAME, ha->instance,
15980 		    ha->fw_class);
15981 		return (QL_FW_NOT_SUPPORTED);
15982 	}
15983 
15984 	/*
15985 	 * open the module related to the fw_class
15986 	 */
15987 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
15988 	    ha->fw_class);
15989 
15990 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
15991 	if (ha->fw_module == NULL) {
15992 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
15993 		    QL_NAME, ha->instance, module);
15994 		return (QL_FWMODLOAD_FAILED);
15995 	}
15996 
15997 	/*
15998 	 * resolve the fw module symbols, data types depend on fw_class
15999 	 */
16000 
16001 	switch (ha->fw_class) {
16002 	case 0x2200:
16003 	case 0x2300:
16004 	case 0x6322:
16005 
16006 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16007 		    NULL)) == NULL) {
16008 			rval = QL_FWSYM_NOT_FOUND;
16009 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16010 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
16011 		    "risc_code_addr01", NULL)) == NULL) {
16012 			rval = QL_FWSYM_NOT_FOUND;
16013 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16014 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
16015 		    "risc_code_length01", NULL)) == NULL) {
16016 			rval = QL_FWSYM_NOT_FOUND;
16017 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16018 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
16019 		    "firmware_version", NULL)) == NULL) {
16020 			rval = QL_FWSYM_NOT_FOUND;
16021 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16022 		}
16023 
16024 		if (rval == QL_SUCCESS) {
16025 			ha->risc_fw[0].code = code;
16026 			ha->risc_fw[0].addr = *p_usaddr;
16027 			ha->risc_fw[0].length = *p_uslen;
16028 
16029 			(void) snprintf(fw_version, sizeof (fw_version),
16030 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
16031 		}
16032 		break;
16033 
16034 	case 0x2400:
16035 	case 0x2500:
16036 
16037 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
16038 		    NULL)) == NULL) {
16039 			rval = QL_FWSYM_NOT_FOUND;
16040 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
16041 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
16042 		    "risc_code_addr01", NULL)) == NULL) {
16043 			rval = QL_FWSYM_NOT_FOUND;
16044 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
16045 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
16046 		    "risc_code_length01", NULL)) == NULL) {
16047 			rval = QL_FWSYM_NOT_FOUND;
16048 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
16049 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
16050 		    "firmware_version", NULL)) == NULL) {
16051 			rval = QL_FWSYM_NOT_FOUND;
16052 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
16053 		}
16054 
16055 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
16056 		    NULL)) == NULL) {
16057 			rval = QL_FWSYM_NOT_FOUND;
16058 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
16059 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
16060 		    "risc_code_addr02", NULL)) == NULL) {
16061 			rval = QL_FWSYM_NOT_FOUND;
16062 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
16063 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
16064 		    "risc_code_length02", NULL)) == NULL) {
16065 			rval = QL_FWSYM_NOT_FOUND;
16066 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
16067 		}
16068 
16069 		if (rval == QL_SUCCESS) {
16070 			ha->risc_fw[0].code = code;
16071 			ha->risc_fw[0].addr = *p_uiaddr;
16072 			ha->risc_fw[0].length = *p_uilen;
16073 			ha->risc_fw[1].code = code02;
16074 			ha->risc_fw[1].addr = *p_uiaddr02;
16075 			ha->risc_fw[1].length = *p_uilen02;
16076 
16077 			(void) snprintf(fw_version, sizeof (fw_version),
16078 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
16079 		}
16080 		break;
16081 
16082 	default:
16083 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
16084 		rval = QL_FW_NOT_SUPPORTED;
16085 	}
16086 
16087 	if (rval != QL_SUCCESS) {
16088 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
16089 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
16090 		if (ha->fw_module != NULL) {
16091 			(void) ddi_modclose(ha->fw_module);
16092 			ha->fw_module = NULL;
16093 		}
16094 	} else {
16095 		/*
16096 		 * check for firmware version mismatch between module and
16097 		 * compiled in fw_table version.
16098 		 */
16099 
16100 		if (strcmp(fwt->fw_version, fw_version) != 0) {
16101 
16102 			/*
16103 			 * If f/w / driver version mismatches then
16104 			 * return a successful status -- however warn
16105 			 * the user that this is NOT recommended.
16106 			 */
16107 
16108 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
16109 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
16110 			    ha->instance, ha->fw_class, fwt->fw_version,
16111 			    fw_version);
16112 
16113 			ha->cfg_flags |= CFG_FW_MISMATCH;
16114 		} else {
16115 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
16116 		}
16117 	}
16118 
16119 	return (rval);
16120 }
16121 
16122 /*
16123  * ql_port_state
16124  *	Set the state on all adapter ports.
16125  *
16126  * Input:
16127  *	ha:	parent adapter state pointer.
16128  *	state:	port state.
16129  *	flags:	task daemon flags to set.
16130  *
16131  * Context:
16132  *	Interrupt or Kernel context, no mailbox commands allowed.
16133  */
16134 void
16135 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
16136 {
16137 	ql_adapter_state_t	*vha;
16138 
16139 	TASK_DAEMON_LOCK(ha);
16140 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
16141 		if (FC_PORT_STATE_MASK(vha->state) != state) {
16142 			vha->state = state != FC_STATE_OFFLINE ?
16143 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
16144 			vha->task_daemon_flags |= flags;
16145 		}
16146 	}
16147 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
16148 	TASK_DAEMON_UNLOCK(ha);
16149 }
16150 
16151 
16152 /*
16153  * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
16154  *
16155  * Input:	Pointer to the adapter state structure.
16156  * Returns:	Success or Failure.
16157  * Context:	Kernel context.
16158  */
16159 int
16160 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
16161 {
16162 	int	rval;
16163 
16164 	rval = DDI_SUCCESS;
16165 
16166 	ha->el_trace_desc =
16167 	    (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
16168 
16169 	if (ha->el_trace_desc == NULL) {
16170 		cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
16171 		    QL_NAME, ha->instance);
16172 		rval = DDI_FAILURE;
16173 	} else {
16174 		ha->el_trace_desc->next		= 0;
16175 		ha->el_trace_desc->trace_buffer =
16176 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
16177 
16178 		if (ha->el_trace_desc->trace_buffer == NULL) {
16179 			cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
16180 			    QL_NAME, ha->instance);
16181 			kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16182 			rval = DDI_FAILURE;
16183 		} else {
16184 			ha->el_trace_desc->trace_buffer_size =
16185 			    EL_TRACE_BUF_SIZE;
16186 			mutex_init(&ha->el_trace_desc->mutex, NULL,
16187 			    MUTEX_DRIVER, NULL);
16188 		}
16189 	}
16190 	return (rval);
16191 }
16192 
16193 /*
16194  * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
16195  *
16196  * Input:	Pointer to the adapter state structure.
16197  * Returns:	Success or Failure.
16198  * Context:	Kernel context.
16199  */
16200 int
16201 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
16202 {
16203 	int	rval;
16204 
16205 	rval = DDI_SUCCESS;
16206 
16207 	if (ha->el_trace_desc == NULL) {
16208 		cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
16209 		    QL_NAME, ha->instance);
16210 		rval = DDI_FAILURE;
16211 	} else {
16212 		if (ha->el_trace_desc->trace_buffer != NULL) {
16213 			kmem_free(ha->el_trace_desc->trace_buffer,
16214 			    ha->el_trace_desc->trace_buffer_size);
16215 		}
16216 		mutex_destroy(&ha->el_trace_desc->mutex);
16217 		kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
16218 	}
16219 	return (rval);
16220 }
16221 
16222 /*
16223  * els_cmd_text	- Return a pointer to a string describing the command
16224  *
16225  * Input:	els_cmd = the els command opcode.
16226  * Returns:	pointer to a string.
16227  * Context:	Kernel context.
16228  */
16229 char *
16230 els_cmd_text(int els_cmd)
16231 {
16232 	cmd_table_t *entry = &els_cmd_tbl[0];
16233 
16234 	return (cmd_text(entry, els_cmd));
16235 }
16236 
16237 /*
16238  * mbx_cmd_text - Return a pointer to a string describing the command
16239  *
16240  * Input:	mbx_cmd = the mailbox command opcode.
16241  * Returns:	pointer to a string.
16242  * Context:	Kernel context.
16243  */
16244 char *
16245 mbx_cmd_text(int mbx_cmd)
16246 {
16247 	cmd_table_t *entry = &mbox_cmd_tbl[0];
16248 
16249 	return (cmd_text(entry, mbx_cmd));
16250 }
16251 
16252 /*
16253  * cmd_text	Return a pointer to a string describing the command
16254  *
16255  * Input:	entry = the command table
16256  *		cmd = the command.
16257  * Returns:	pointer to a string.
16258  * Context:	Kernel context.
16259  */
16260 char *
16261 cmd_text(cmd_table_t *entry, int cmd)
16262 {
16263 	for (; entry->cmd != 0; entry++) {
16264 		if (entry->cmd == cmd) {
16265 			break;
16266 		}
16267 	}
16268 	return (entry->string);
16269 }
16270