xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c (revision a73c0fe4e90b82a478f821ef3adb5cf34f6a9346)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2008 QLogic Corporation */
23 
24 /*
25  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2008 QLogic Corporation; ql_api.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2008 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_isr.h>
51 #include <ql_mbx.h>
52 #include <ql_xioctl.h>
53 
54 #include <sys/ddi.h>
55 #include <sys/sunddi.h>
56 
57 /*
58  * Solaris external defines.
59  */
60 extern pri_t minclsyspri;
61 extern pri_t maxclsyspri;
62 
63 /*
64  * dev_ops functions prototypes
65  */
66 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
67 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
68 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
69 static int ql_power(dev_info_t *, int, int);
70 
71 /*
72  * FCA functions prototypes exported by means of the transport table
73  */
74 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
75     fc_fca_bind_info_t *);
76 static void ql_unbind_port(opaque_t);
77 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
78 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
79 static int ql_els_send(opaque_t, fc_packet_t *);
80 static int ql_get_cap(opaque_t, char *, void *);
81 static int ql_set_cap(opaque_t, char *, void *);
82 static int ql_getmap(opaque_t, fc_lilpmap_t *);
83 static int ql_transport(opaque_t, fc_packet_t *);
84 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
85 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
86 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
87 static int ql_abort(opaque_t, fc_packet_t *, int);
88 static int ql_reset(opaque_t, uint32_t);
89 static int ql_notify(opaque_t, uint32_t);
90 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
91 static opaque_t ql_get_device(opaque_t, fc_portid_t);
92 
93 /*
94  * FCA Driver Support Function Prototypes.
95  */
96 static uint16_t	ql_wait_outstanding(ql_adapter_state_t *);
97 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
98     ql_srb_t *);
99 static void ql_task_daemon(void *);
100 static void ql_task_thread(ql_adapter_state_t *);
101 static void ql_unsol_callback(ql_srb_t *);
102 static void ql_dev_free(ql_adapter_state_t *, ql_tgt_t *);
103 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
104     fc_unsol_buf_t *);
105 static void ql_timer(void *);
106 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
107 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
108     uint32_t *, uint32_t *);
109 static void ql_halt(ql_adapter_state_t *, int);
110 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
122 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
123 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
124 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
125 static int ql_login_port(ql_adapter_state_t *, port_id_t);
126 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
127 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
128 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
129 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
130 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
131 static int ql_fcp_data_rsp(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
132 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
133 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
134 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
135     ql_srb_t *);
136 static int ql_kstat_update(kstat_t *, int);
137 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
138 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
139 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
140 static void ql_rst_aen(ql_adapter_state_t *);
141 static void ql_restart_queues(ql_adapter_state_t *);
142 static void ql_abort_queues(ql_adapter_state_t *);
143 static void ql_idle_check(ql_adapter_state_t *);
144 static int ql_loop_resync(ql_adapter_state_t *);
145 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
146 static size_t ql_25xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
147 static int ql_save_config_regs(dev_info_t *);
148 static int ql_restore_config_regs(dev_info_t *);
149 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
150 static int ql_handle_rscn_update(ql_adapter_state_t *);
151 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
152 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
153 static int ql_dump_firmware(ql_adapter_state_t *);
154 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
155 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
156 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
157 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
158 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
159 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
160     void *);
161 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
162     uint8_t);
163 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
164 static int ql_suspend_adapter(ql_adapter_state_t *);
165 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
166 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
167 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
168 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
169 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
170 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
171 static int ql_setup_interrupts(ql_adapter_state_t *);
172 static int ql_setup_msi(ql_adapter_state_t *);
173 static int ql_setup_msix(ql_adapter_state_t *);
174 static int ql_setup_fixed(ql_adapter_state_t *);
175 static void ql_release_intr(ql_adapter_state_t *);
176 static void ql_disable_intr(ql_adapter_state_t *);
177 static int ql_legacy_intr(ql_adapter_state_t *);
178 static int ql_init_mutex(ql_adapter_state_t *);
179 static void ql_destroy_mutex(ql_adapter_state_t *);
180 static void ql_iidma(ql_adapter_state_t *);
181 
182 /*
183  * Global data
184  */
185 static uint8_t	ql_enable_pm = 1;
186 static int	ql_flash_sbus_fpga = 0;
187 uint32_t	ql_os_release_level;
188 uint32_t	ql_disable_aif = 0;
189 uint32_t	ql_disable_msi = 0;
190 uint32_t	ql_disable_msix = 0;
191 
192 /* Timer routine variables. */
193 static timeout_id_t	ql_timer_timeout_id = NULL;
194 static clock_t		ql_timer_ticks;
195 
196 /* Soft state head pointer. */
197 void *ql_state = NULL;
198 
199 /* Head adapter link. */
200 ql_head_t ql_hba = {
201 	NULL,
202 	NULL
203 };
204 
205 /* Global hba index */
206 uint32_t ql_gfru_hba_index = 1;
207 
208 /*
209  * Firmware dump context.
210  */
211 void			*ql_dump_ptr = NULL;
212 uint32_t		ql_dump_size = 0;
213 volatile uint32_t	ql_dump_state;
214 
215 /*
216  * Some IP defines and globals
217  */
218 uint32_t	ql_ip_buffer_count = 128;
219 uint32_t	ql_ip_low_water = 10;
220 uint8_t		ql_ip_fast_post_count = 5;
221 static int	ql_ip_mtu = 65280;		/* equivalent to FCIPMTU */
222 
223 /* Device AL_PA to Device Head Queue index array. */
224 uint8_t ql_alpa_to_index[] = {
225 	0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
226 	0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
227 	0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
228 	0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
229 	0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
230 	0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
231 	0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
232 	0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
233 	0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
234 	0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
235 	0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
236 	0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
237 	0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
238 	0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
239 	0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
240 	0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
241 	0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
242 	0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
243 	0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
244 	0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
245 	0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
246 	0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
247 	0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
248 	0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
249 	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
250 	0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
251 };
252 
253 /* Device loop_id to ALPA array. */
254 static uint8_t ql_index_to_alpa[] = {
255 	0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
256 	0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
257 	0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
258 	0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
259 	0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
260 	0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
261 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
262 	0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
263 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
264 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
265 	0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
266 	0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
267 	0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
268 };
269 
270 /* 2200 register offsets */
271 static reg_off_t reg_off_2200 = {
272 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
273 	0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */
274 	0x00, 0x00, /* intr info lo, hi */
275 	24, /* Number of mailboxes */
276 	/* Mailbox register offsets */
277 	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
278 	0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
279 	0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
280 	/* 2200 does not have mailbox 24-31 */
281 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
282 	0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce,
283 	/* host to host sema */
284 	0x00,
285 	/* 2200 does not have pri_req_in, pri_req_out, */
286 	/* atio_req_in, atio_req_out, io_base_addr */
287 	0xff, 0xff, 0xff, 0xff,	0xff
288 };
289 
290 /* 2300 register offsets */
291 static reg_off_t reg_off_2300 = {
292 	0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
293 	0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */
294 	0x18, 0x1A, /* intr info lo, hi */
295 	32, /* Number of mailboxes */
296 	/* Mailbox register offsets */
297 	0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
298 	0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
299 	0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
300 	0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
301 	0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce,
302 	/* host to host sema */
303 	0x1c,
304 	/* 2300 does not have pri_req_in, pri_req_out, */
305 	/* atio_req_in, atio_req_out, io_base_addr */
306 	0xff, 0xff, 0xff, 0xff,	0xff
307 };
308 
309 /* 2400/2500 register offsets */
310 reg_off_t reg_off_2400_2500 = {
311 	0x00, 0x04,		/* flash_address, flash_data */
312 	0x08, 0x0c, 0x10,	/* ctrl_status, ictrl, istatus */
313 	/* 2400 does not have semaphore, nvram */
314 	0x14, 0x18,
315 	0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */
316 	0x44, 0x46,		/* intr info lo, hi */
317 	32,			/* Number of mailboxes */
318 	/* Mailbox register offsets */
319 	0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
320 	0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
321 	0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
322 	0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
323 	/* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */
324 	0xff, 0xff, 0xff, 0xff,
325 	0x48, 0x4c, 0x50,	/* hccr, gpiod, gpioe */
326 	0xff,			/* host to host sema */
327 	0x2c, 0x30,		/* pri_req_in, pri_req_out */
328 	0x3c, 0x40,		/* atio_req_in, atio_req_out */
329 	0x54			/* io_base_addr */
330 };
331 
332 /* mutex for protecting variables shared by all instances of the driver */
333 kmutex_t ql_global_mutex;
334 kmutex_t ql_global_hw_mutex;
335 kmutex_t ql_global_el_mutex;
336 
337 /* DMA access attribute structure. */
338 static ddi_device_acc_attr_t ql_dev_acc_attr = {
339 	DDI_DEVICE_ATTR_V0,
340 	DDI_STRUCTURE_LE_ACC,
341 	DDI_STRICTORDER_ACC
342 };
343 
344 /* I/O DMA attributes structures. */
345 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
346 	DMA_ATTR_V0,			/* dma_attr_version */
347 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
348 	QL_DMA_HIGH_64BIT_ADDRESS,	/* high DMA address range */
349 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
350 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
351 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
352 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
353 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
354 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
355 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
356 	QL_DMA_GRANULARITY,		/* granularity of device */
357 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
358 };
359 
360 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
361 	DMA_ATTR_V0,			/* dma_attr_version */
362 	QL_DMA_LOW_ADDRESS,		/* low DMA address range */
363 	QL_DMA_HIGH_32BIT_ADDRESS,	/* high DMA address range */
364 	QL_DMA_XFER_COUNTER,		/* DMA counter register */
365 	QL_DMA_ADDRESS_ALIGNMENT,	/* DMA address alignment */
366 	QL_DMA_BURSTSIZES,		/* DMA burstsizes */
367 	QL_DMA_MIN_XFER_SIZE,		/* min effective DMA size */
368 	QL_DMA_MAX_XFER_SIZE,		/* max DMA xfer size */
369 	QL_DMA_SEGMENT_BOUNDARY,	/* segment boundary */
370 	QL_DMA_SG_LIST_LENGTH,		/* s/g list length */
371 	QL_DMA_GRANULARITY,		/* granularity of device */
372 	QL_DMA_XFER_FLAGS		/* DMA transfer flags */
373 };
374 
375 /* Load the default dma attributes */
376 static	ddi_dma_attr_t	ql_32fcsm_cmd_dma_attr;
377 static	ddi_dma_attr_t	ql_64fcsm_cmd_dma_attr;
378 static	ddi_dma_attr_t	ql_32fcsm_rsp_dma_attr;
379 static	ddi_dma_attr_t	ql_64fcsm_rsp_dma_attr;
380 static	ddi_dma_attr_t	ql_32fcip_cmd_dma_attr;
381 static	ddi_dma_attr_t	ql_64fcip_cmd_dma_attr;
382 static	ddi_dma_attr_t	ql_32fcip_rsp_dma_attr;
383 static	ddi_dma_attr_t	ql_64fcip_rsp_dma_attr;
384 static	ddi_dma_attr_t	ql_32fcp_cmd_dma_attr;
385 static	ddi_dma_attr_t	ql_64fcp_cmd_dma_attr;
386 static	ddi_dma_attr_t	ql_32fcp_rsp_dma_attr;
387 static	ddi_dma_attr_t	ql_64fcp_rsp_dma_attr;
388 static	ddi_dma_attr_t	ql_32fcp_data_dma_attr;
389 static	ddi_dma_attr_t	ql_64fcp_data_dma_attr;
390 
391 #if 0
392 /* I/O DMA limit structures. */
393 static ddi_dma_lim_t ql_32bit_dma_limit = {
394 	QL_DMA_LOW_ADDRESS,		/* low range of 32 bit addressing */
395 					/* capability */
396 	QL_DMA_HIGH_32BIT_ADDRESS,	/* inclusive upper bound of addr */
397 					/* capability */
398 	QL_DMA_SEGMENT_BOUNDARY,	/* inclusive upper bound of dma */
399 					/* engine's address limit */
400 	QL_DMA_BURSTSIZES,		/* binary encoded dma burst sizes */
401 	QL_DMA_GRANULARITY,		/* minimum effective dma xfer size */
402 	0				/* average dma data rate (kb/s) */
403 };
404 
405 static ddi_dma_lim_t ql_64bit_dma_limit = {
406 	QL_DMA_LOW_ADDRESS,		/* low range of 32 bit addressing */
407 					/* capability */
408 	QL_DMA_HIGH_64BIT_ADDRESS,	/* inclusive upper bound of addr */
409 					/* capability */
410 	QL_DMA_SEGMENT_BOUNDARY,	/* inclusive upper bound of dma */
411 					/* engine's address limit */
412 	QL_DMA_BURSTSIZES,		/* binary encoded dma burst sizes */
413 	QL_DMA_GRANULARITY,		/* minimum effective dma xfer size */
414 	0				/* average dma data rate (kb/s) */
415 };
416 #endif
417 
418 /* Static declarations of cb_ops entry point functions... */
419 static struct cb_ops ql_cb_ops = {
420 	ql_open,			/* b/c open */
421 	ql_close,			/* b/c close */
422 	nodev,				/* b strategy */
423 	nodev,				/* b print */
424 	nodev,				/* b dump */
425 	nodev,				/* c read */
426 	nodev,				/* c write */
427 	ql_ioctl,			/* c ioctl */
428 	nodev,				/* c devmap */
429 	nodev,				/* c mmap */
430 	nodev,				/* c segmap */
431 	nochpoll,			/* c poll */
432 	nodev,				/* cb_prop_op */
433 	NULL,				/* streamtab  */
434 	D_MP | D_NEW | D_HOTPLUG,	/* Driver compatibility flag */
435 	CB_REV,				/* cb_ops revision */
436 	nodev,				/* c aread */
437 	nodev				/* c awrite */
438 };
439 
440 /* Static declarations of dev_ops entry point functions... */
441 static struct dev_ops ql_devops = {
442 	DEVO_REV,			/* devo_rev */
443 	0,				/* refcnt */
444 	ql_getinfo,			/* getinfo */
445 	nulldev,			/* identify */
446 	nulldev,			/* probe */
447 	ql_attach,			/* attach */
448 	ql_detach,			/* detach */
449 	nodev,				/* reset */
450 	&ql_cb_ops,			/* char/block ops */
451 	NULL,				/* bus operations */
452 	ql_power			/* power management */
453 };
454 
455 
456 char qlc_driver_version[] = QL_VERSION;
457 
458 /*
459  * Loadable Driver Interface Structures.
460  * Declare and initialize the module configuration section...
461  */
462 static struct modldrv modldrv = {
463 	&mod_driverops,				/* type of module: driver */
464 	"SunFC Qlogic FCA v" QL_VERSION,	/* name of module */
465 	&ql_devops				/* driver dev_ops */
466 };
467 
468 static struct modlinkage modlinkage = {
469 	MODREV_1,
470 	&modldrv,
471 	NULL
472 };
473 
474 /* ************************************************************************ */
475 /*				Loadable Module Routines.		    */
476 /* ************************************************************************ */
477 
478 /*
479  * _init
480  *	Initializes a loadable module. It is called before any other
481  *	routine in a loadable module.
482  *
483  * Returns:
484  *	0 = success
485  *
486  * Context:
487  *	Kernel context.
488  */
489 int
490 _init(void)
491 {
492 	uint16_t	w16;
493 	int		rval = 0;
494 
495 	/* Get OS major release level. */
496 	for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
497 		if (utsname.release[w16] == '.') {
498 			w16++;
499 			break;
500 		}
501 	}
502 	if (w16 < sizeof (utsname.release)) {
503 		(void) ql_bstr_to_dec(&utsname.release[w16],
504 		    &ql_os_release_level, 0);
505 	} else {
506 		ql_os_release_level = 0;
507 	}
508 	if (ql_os_release_level < 6) {
509 		cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
510 		    QL_NAME, ql_os_release_level);
511 		rval = EINVAL;
512 	}
513 	if (ql_os_release_level == 6) {
514 		ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
515 		ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
516 	}
517 
518 	if (rval == 0) {
519 		rval = ddi_soft_state_init(&ql_state,
520 		    sizeof (ql_adapter_state_t), 0);
521 	}
522 	if (rval == 0) {
523 		/* allow the FC Transport to tweak the dev_ops */
524 		fc_fca_init(&ql_devops);
525 
526 		mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
527 		mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
528 		mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
529 		rval = mod_install(&modlinkage);
530 		if (rval != 0) {
531 			mutex_destroy(&ql_global_hw_mutex);
532 			mutex_destroy(&ql_global_mutex);
533 			mutex_destroy(&ql_global_el_mutex);
534 			ddi_soft_state_fini(&ql_state);
535 		} else {
536 			/*EMPTY*/
537 			ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
538 			ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
539 			ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
540 			ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
541 			ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
542 			ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
543 			ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
544 			ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
545 			ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
546 			ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
547 			ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
548 			ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
549 			ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
550 			ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
551 			ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
552 			    ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
553 			    QL_FCSM_CMD_SGLLEN;
554 			ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
555 			    ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
556 			    QL_FCSM_RSP_SGLLEN;
557 			ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
558 			    ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
559 			    QL_FCIP_CMD_SGLLEN;
560 			ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
561 			    ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
562 			    QL_FCIP_RSP_SGLLEN;
563 			ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
564 			    ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
565 			    QL_FCP_CMD_SGLLEN;
566 			ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
567 			    ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
568 			    QL_FCP_RSP_SGLLEN;
569 		}
570 	}
571 
572 	if (rval != 0) {
573 		cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
574 		    QL_NAME);
575 	}
576 
577 	return (rval);
578 }
579 
580 /*
581  * _fini
582  *	Prepares a module for unloading. It is called when the system
583  *	wants to unload a module. If the module determines that it can
584  *	be unloaded, then _fini() returns the value returned by
585  *	mod_remove(). Upon successful return from _fini() no other
586  *	routine in the module will be called before _init() is called.
587  *
588  * Returns:
589  *	0 = success
590  *
591  * Context:
592  *	Kernel context.
593  */
594 int
595 _fini(void)
596 {
597 	int	rval;
598 
599 	rval = mod_remove(&modlinkage);
600 	if (rval == 0) {
601 		mutex_destroy(&ql_global_hw_mutex);
602 		mutex_destroy(&ql_global_mutex);
603 		mutex_destroy(&ql_global_el_mutex);
604 		ddi_soft_state_fini(&ql_state);
605 	}
606 
607 	return (rval);
608 }
609 
610 /*
611  * _info
612  *	Returns information about loadable module.
613  *
614  * Input:
615  *	modinfo = pointer to module information structure.
616  *
617  * Returns:
618  *	Value returned by mod_info().
619  *
620  * Context:
621  *	Kernel context.
622  */
623 int
624 _info(struct modinfo *modinfop)
625 {
626 	return (mod_info(&modlinkage, modinfop));
627 }
628 
629 /* ************************************************************************ */
630 /*			dev_ops functions				    */
631 /* ************************************************************************ */
632 
633 /*
634  * ql_getinfo
635  *	Returns the pointer associated with arg when cmd is
636  *	set to DDI_INFO_DEVT2DEVINFO, or it should return the
637  *	instance number associated with arg when cmd is set
638  *	to DDI_INFO_DEV2INSTANCE.
639  *
640  * Input:
641  *	dip = Do not use.
642  *	cmd = command argument.
643  *	arg = command specific argument.
644  *	resultp = pointer to where request information is stored.
645  *
646  * Returns:
647  *	DDI_SUCCESS or DDI_FAILURE.
648  *
649  * Context:
650  *	Kernel context.
651  */
652 /* ARGSUSED */
653 static int
654 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
655 {
656 	ql_adapter_state_t	*ha;
657 	int			minor;
658 	int			rval = DDI_FAILURE;
659 
660 
661 	minor = (int)(getminor((dev_t)arg));
662 	ha = ddi_get_soft_state(ql_state, minor);
663 	if (ha == NULL) {
664 		QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
665 		    getminor((dev_t)arg));
666 		*resultp = NULL;
667 		return (rval);
668 	}
669 
670 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
671 
672 	switch (cmd) {
673 	case DDI_INFO_DEVT2DEVINFO:
674 		*resultp = ha->dip;
675 		rval = DDI_SUCCESS;
676 		break;
677 	case DDI_INFO_DEVT2INSTANCE:
678 		*resultp = (void*)(uintptr_t)ha->instance;
679 		rval = DDI_SUCCESS;
680 		break;
681 	default:
682 		EL(ha, "failed, unsupported cmd=%d\n", cmd);
683 		rval = DDI_FAILURE;
684 		break;
685 	}
686 
687 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
688 
689 	return (rval);
690 }
691 
692 /*
693  * ql_attach
694  *	Configure and attach an instance of the driver
695  *	for a port.
696  *
697  * Input:
698  *	dip = pointer to device information structure.
699  *	cmd = attach type.
700  *
701  * Returns:
702  *	DDI_SUCCESS or DDI_FAILURE.
703  *
704  * Context:
705  *	Kernel context.
706  */
707 static int
708 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
709 {
710 	uint32_t		size;
711 	int			rval;
712 	int			instance;
713 	uint_t			progress = 0;
714 	char			*buf;
715 	ushort_t		caps_ptr, cap;
716 	fc_fca_tran_t		*tran;
717 	ql_adapter_state_t	*ha = NULL;
718 	static char *pmcomps[] = {
719 		NULL,
720 		PM_LEVEL_D3_STR,		/* Device OFF */
721 		PM_LEVEL_D0_STR,		/* Device ON */
722 	};
723 
724 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
725 	    ddi_get_instance(dip), cmd);
726 
727 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
728 
729 	switch (cmd) {
730 	case DDI_ATTACH:
731 		/* first get the instance */
732 		instance = ddi_get_instance(dip);
733 
734 		cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
735 		    QL_NAME, instance, QL_VERSION);
736 
737 		/* Correct OS version? */
738 		if (ql_os_release_level != 11) {
739 			cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
740 			    "11", QL_NAME, instance);
741 			goto attach_failed;
742 		}
743 
744 		/* Hardware is installed in a DMA-capable slot? */
745 		if (ddi_slaveonly(dip) == DDI_SUCCESS) {
746 			cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
747 			    instance);
748 			goto attach_failed;
749 		}
750 
751 		/* No support for high-level interrupts */
752 		if (ddi_intr_hilevel(dip, 0) != 0) {
753 			cmn_err(CE_WARN, "%s(%d): High level interrupt"
754 			    " not supported", QL_NAME, instance);
755 			goto attach_failed;
756 		}
757 
758 		/* Allocate our per-device-instance structure */
759 		if (ddi_soft_state_zalloc(ql_state,
760 		    instance) != DDI_SUCCESS) {
761 			cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
762 			    QL_NAME, instance);
763 			goto attach_failed;
764 		}
765 		progress |= QL_SOFT_STATE_ALLOCED;
766 
767 		ha = ddi_get_soft_state(ql_state, instance);
768 		if (ha == NULL) {
769 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
770 			    QL_NAME, instance);
771 			goto attach_failed;
772 		}
773 		ha->dip = dip;
774 		ha->instance = instance;
775 		ha->hba.base_address = ha;
776 		ha->pha = ha;
777 
778 		/* Get extended logging and dump flags. */
779 		ql_common_properties(ha);
780 
781 		if (strcmp(ddi_driver_name(ddi_get_parent(dip)), "sbus") == 0) {
782 			EL(ha, "%s SBUS card detected", QL_NAME);
783 			ha->cfg_flags |= CFG_SBUS_CARD;
784 		}
785 
786 		ha->dev = kmem_zalloc(sizeof (*ha->dev) *
787 		    DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
788 
789 		ha->outstanding_cmds = kmem_zalloc(
790 		    sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
791 		    KM_SLEEP);
792 
793 		ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
794 		    QL_UB_LIMIT, KM_SLEEP);
795 
796 		ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
797 		    KM_SLEEP);
798 
799 		(void) ddi_pathname(dip, buf);
800 		ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
801 		if (ha->devpath == NULL) {
802 			EL(ha, "devpath mem alloc failed\n");
803 		} else {
804 			(void) strcpy(ha->devpath, buf);
805 			EL(ha, "devpath is: %s\n", ha->devpath);
806 		}
807 
808 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
809 			/*
810 			 * For cards where PCI is mapped to sbus e.g. Ivory.
811 			 *
812 			 * 0x00	: 0x000 - 0x0FF PCI Config Space for 2200
813 			 *	: 0x100 - 0x3FF PCI IO space for 2200
814 			 * 0x01	: 0x000 - 0x0FF PCI Config Space for fpga
815 			 *	: 0x100 - 0x3FF PCI IO Space for fpga
816 			 */
817 			if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
818 			    0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle)
819 			    != DDI_SUCCESS) {
820 				cmn_err(CE_WARN, "%s(%d): Unable to map device"
821 				    " registers", QL_NAME, instance);
822 				goto attach_failed;
823 			}
824 			if (ddi_regs_map_setup(dip, 1,
825 			    (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
826 			    &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle)
827 			    != DDI_SUCCESS) {
828 				/* We should not fail attach here */
829 				cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
830 				    QL_NAME, instance);
831 				ha->sbus_fpga_iobase = NULL;
832 			}
833 			progress |= QL_REGS_MAPPED;
834 		} else {
835 			/*
836 			 * Setup the ISP2200 registers address mapping to be
837 			 * accessed by this particular driver.
838 			 * 0x0   Configuration Space
839 			 * 0x1   I/O Space
840 			 * 0x2   32-bit Memory Space address
841 			 * 0x3   64-bit Memory Space address
842 			 */
843 			if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase,
844 			    0, 0x100, &ql_dev_acc_attr,
845 			    &ha->dev_handle) != DDI_SUCCESS) {
846 				cmn_err(CE_WARN, "%s(%d): regs_map_setup "
847 				    "failed", QL_NAME, instance);
848 				goto attach_failed;
849 			}
850 			progress |= QL_REGS_MAPPED;
851 
852 			/*
853 			 * We need I/O space mappings for 23xx HBAs for
854 			 * loading flash (FCode). The chip has a bug due to
855 			 * which loading flash fails through mem space
856 			 * mappings in PCI-X mode.
857 			 */
858 			if (ddi_regs_map_setup(dip, 1,
859 			    (caddr_t *)&ha->iomap_iobase, 0, 0x100,
860 			    &ql_dev_acc_attr,
861 			    &ha->iomap_dev_handle) != DDI_SUCCESS) {
862 				cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)"
863 				    " failed", QL_NAME, instance);
864 				goto attach_failed;
865 			}
866 			progress |= QL_IOMAP_IOBASE_MAPPED;
867 		}
868 
869 		/*
870 		 * We should map config space before adding interrupt
871 		 * So that the chip type (2200 or 2300) can be determined
872 		 * before the interrupt routine gets a chance to execute.
873 		 */
874 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
875 			if (ddi_regs_map_setup(dip, 0,
876 			    (caddr_t *)&ha->sbus_config_base, 0, 0x100,
877 			    &ql_dev_acc_attr, &ha->sbus_config_handle) !=
878 			    DDI_SUCCESS) {
879 				cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
880 				    "config registers", QL_NAME, instance);
881 				goto attach_failed;
882 			}
883 		} else {
884 			if (pci_config_setup(ha->dip, &ha->pci_handle) !=
885 			    DDI_SUCCESS) {
886 				cmn_err(CE_WARN, "%s(%d): can't setup PCI "
887 				    "config space", QL_NAME, instance);
888 				goto attach_failed;
889 			}
890 		}
891 		progress |= QL_CONFIG_SPACE_SETUP;
892 
893 		ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
894 		    PCI_CONF_SUBSYSID);
895 		ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
896 		    PCI_CONF_SUBVENID);
897 		ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
898 		    PCI_CONF_VENID);
899 		ha->device_id = (uint16_t)ql_pci_config_get16(ha,
900 		    PCI_CONF_DEVID);
901 		ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
902 		    PCI_CONF_REVID);
903 
904 		EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
905 		    "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
906 		    ha->subven_id, ha->subsys_id);
907 
908 		switch (ha->device_id) {
909 		case 0x2300:
910 		case 0x2312:
911 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
912 		case 0x6312:
913 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
914 			ha->cfg_flags |= CFG_CTRL_2300;
915 			ha->fw_class = 0x2300;
916 			ha->reg_off = &reg_off_2300;
917 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
918 				goto attach_failed;
919 			}
920 			ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
921 			ha->fcp_cmd = ql_command_iocb;
922 			ha->ip_cmd = ql_ip_iocb;
923 			ha->ms_cmd = ql_ms_iocb;
924 			ha->ctio_cmd = ql_continue_target_io_iocb;
925 			break;
926 
927 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES)
928 		case 0x6322:
929 			/*
930 			 * per marketing, fibre-lite HBA's are not supported
931 			 * on sparc platforms
932 			 */
933 			ha->cfg_flags |= CFG_CTRL_6322;
934 			ha->fw_class = 0x6322;
935 			ha->reg_off = &reg_off_2300;
936 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
937 				goto attach_failed;
938 			}
939 			ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
940 			ha->fcp_cmd = ql_command_iocb;
941 			ha->ip_cmd = ql_ip_iocb;
942 			ha->ms_cmd = ql_ms_iocb;
943 			ha->ctio_cmd = ql_continue_target_io_iocb;
944 			break;
945 
946 #endif	/* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */
947 		case 0x2200:
948 			ha->cfg_flags |= CFG_CTRL_2200;
949 			ha->reg_off = &reg_off_2200;
950 			ha->fw_class = 0x2200;
951 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
952 				goto attach_failed;
953 			}
954 			ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
955 			ha->fcp_cmd = ql_command_iocb;
956 			ha->ip_cmd = ql_ip_iocb;
957 			ha->ms_cmd = ql_ms_iocb;
958 			ha->ctio_cmd = ql_continue_target_io_iocb;
959 			break;
960 
961 		case 0x2422:
962 		case 0x2432:
963 		case 0x5422:
964 		case 0x5432:
965 		case 0x8432:
966 #ifdef __sparc
967 			/*
968 			 * Per marketing, the QLA/QLE-2440's (which
969 			 * also use the 2422 & 2432) are only for the
970 			 * x86 platform (SMB market).
971 			 */
972 			if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 ||
973 			    ha->subsys_id == 0x13e) {
974 				cmn_err(CE_WARN,
975 				    "%s(%d): Unsupported HBA ssid: %x",
976 				    QL_NAME, instance, ha->subsys_id);
977 				goto attach_failed;
978 			}
979 #endif	/* __sparc */
980 			ha->cfg_flags |= CFG_CTRL_2422;
981 			if (ha->device_id == 0x8432) {
982 				ha->cfg_flags |= CFG_CTRL_MENLO;
983 			} else {
984 				ha->flags |= VP_ENABLED;
985 			}
986 			ha->reg_off = &reg_off_2400_2500;
987 			ha->fw_class = 0x2400;
988 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
989 				goto attach_failed;
990 			}
991 			ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
992 			ha->fcp_cmd = ql_command_24xx_iocb;
993 			ha->ip_cmd = ql_ip_24xx_iocb;
994 			ha->ms_cmd = ql_ms_24xx_iocb;
995 			ha->ctio_cmd = ql_continue_target_io_2400_iocb;
996 			ha->flash_errlog_start = RD32_IO_REG(ha, ctrl_status) &
997 			    FUNCTION_NUMBER ? FLASH_2400_ERRLOG_START_ADDR_1 :
998 			    FLASH_2400_ERRLOG_START_ADDR_0;
999 			break;
1000 
1001 		case 0x2522:
1002 		case 0x2532:
1003 			ha->cfg_flags |= CFG_CTRL_25XX;
1004 			ha->flags |= VP_ENABLED;
1005 			ha->fw_class = 0x2500;
1006 			ha->reg_off = &reg_off_2400_2500;
1007 			if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1008 				goto attach_failed;
1009 			}
1010 			ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1011 			ha->fcp_cmd = ql_command_24xx_iocb;
1012 			ha->ip_cmd = ql_ip_24xx_iocb;
1013 			ha->ms_cmd = ql_ms_24xx_iocb;
1014 			ha->ctio_cmd = ql_continue_target_io_2400_iocb;
1015 			ha->flash_errlog_start = RD32_IO_REG(ha, ctrl_status) &
1016 			    FUNCTION_NUMBER ? FLASH_2500_ERRLOG_START_ADDR_1 :
1017 			    FLASH_2500_ERRLOG_START_ADDR_0;
1018 			break;
1019 
1020 		default:
1021 			cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1022 			    QL_NAME, instance, ha->device_id);
1023 			goto attach_failed;
1024 		}
1025 
1026 		/* Setup hba buffer. */
1027 
1028 		size = CFG_IST(ha, CFG_CTRL_2425) ?
1029 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1030 		    (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1031 		    RCVBUF_QUEUE_SIZE);
1032 
1033 		if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1034 		    MEM_RING_ALIGN) != QL_SUCCESS) {
1035 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1036 			    "alloc failed", QL_NAME, instance);
1037 			goto attach_failed;
1038 		}
1039 		progress |= QL_HBA_BUFFER_SETUP;
1040 
1041 		/* Setup buffer pointers. */
1042 		ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1043 		    REQUEST_Q_BUFFER_OFFSET;
1044 		ha->request_ring_bp = (struct cmd_entry *)
1045 		    ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1046 
1047 		ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1048 		    RESPONSE_Q_BUFFER_OFFSET;
1049 		ha->response_ring_bp = (struct sts_entry *)
1050 		    ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1051 
1052 		ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1053 		    RCVBUF_Q_BUFFER_OFFSET;
1054 		ha->rcvbuf_ring_bp = (struct rcvbuf *)
1055 		    ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1056 
1057 		/* Allocate resource for QLogic IOCTL */
1058 		(void) ql_alloc_xioctl_resource(ha);
1059 
1060 		/* Setup interrupts */
1061 		if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1062 			cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1063 			    "rval=%xh", QL_NAME, instance, rval);
1064 			goto attach_failed;
1065 		}
1066 
1067 		progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1068 
1069 		/*
1070 		 * Determine support for Power Management
1071 		 */
1072 		caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1073 
1074 		while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1075 			cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1076 			if (cap == PCI_CAP_ID_PM) {
1077 				ha->pm_capable = 1;
1078 				break;
1079 			}
1080 			caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1081 			    PCI_CAP_NEXT_PTR);
1082 		}
1083 
1084 		if (ha->pm_capable) {
1085 			/*
1086 			 * Enable PM for 2200 based HBAs only.
1087 			 */
1088 			if (ha->device_id != 0x2200) {
1089 				ha->pm_capable = 0;
1090 			}
1091 		}
1092 
1093 		if (ha->pm_capable) {
1094 			ha->pm_capable = ql_enable_pm;
1095 		}
1096 
1097 		if (ha->pm_capable) {
1098 			/*
1099 			 * Initialize power management bookkeeping;
1100 			 * components are created idle.
1101 			 */
1102 			(void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1103 			pmcomps[0] = buf;
1104 
1105 			/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1106 			if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1107 			    dip, "pm-components", pmcomps,
1108 			    sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1109 			    DDI_PROP_SUCCESS) {
1110 				cmn_err(CE_WARN, "%s(%d): failed to create"
1111 				    " pm-components property", QL_NAME,
1112 				    instance);
1113 
1114 				/* Initialize adapter. */
1115 				ha->power_level = PM_LEVEL_D0;
1116 				if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1117 					cmn_err(CE_WARN, "%s(%d): failed to"
1118 					    " initialize adapter", QL_NAME,
1119 					    instance);
1120 					goto attach_failed;
1121 				}
1122 			} else {
1123 				ha->power_level = PM_LEVEL_D3;
1124 				if (pm_raise_power(dip, QL_POWER_COMPONENT,
1125 				    PM_LEVEL_D0) != DDI_SUCCESS) {
1126 					cmn_err(CE_WARN, "%s(%d): failed to"
1127 					    " raise power or initialize"
1128 					    " adapter", QL_NAME, instance);
1129 				}
1130 				ASSERT(ha->power_level == PM_LEVEL_D0);
1131 			}
1132 		} else {
1133 			/* Initialize adapter. */
1134 			ha->power_level = PM_LEVEL_D0;
1135 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1136 				cmn_err(CE_WARN, "%s(%d): failed to initialize"
1137 				    " adapter", QL_NAME, instance);
1138 			}
1139 		}
1140 
1141 		if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1142 		    ha->fw_subminor_version == 0) {
1143 			cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1144 			    QL_NAME, ha->instance);
1145 		} else {
1146 			cmn_err(CE_NOTE, "!%s(%d): Firmware version %d.%d.%d",
1147 			    QL_NAME, ha->instance, ha->fw_major_version,
1148 			    ha->fw_minor_version, ha->fw_subminor_version);
1149 		}
1150 
1151 		ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1152 		    "controller", KSTAT_TYPE_RAW,
1153 		    (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1154 		if (ha->k_stats == NULL) {
1155 			cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1156 			    QL_NAME, instance);
1157 			goto attach_failed;
1158 		}
1159 		progress |= QL_KSTAT_CREATED;
1160 
1161 		ha->adapter_stats->version = 1;
1162 		ha->k_stats->ks_data = (void *)ha->adapter_stats;
1163 		ha->k_stats->ks_private = ha;
1164 		ha->k_stats->ks_update = ql_kstat_update;
1165 		ha->k_stats->ks_ndata = 1;
1166 		ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1167 		kstat_install(ha->k_stats);
1168 
1169 		if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1170 		    instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1171 			cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1172 			    QL_NAME, instance);
1173 			goto attach_failed;
1174 		}
1175 		progress |= QL_MINOR_NODE_CREATED;
1176 
1177 		/* Allocate a transport structure for this instance */
1178 		tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1179 		ASSERT(tran != NULL);
1180 
1181 		progress |= QL_FCA_TRAN_ALLOCED;
1182 
1183 		/* fill in the structure */
1184 		tran->fca_numports = 1;
1185 		tran->fca_version = FCTL_FCA_MODREV_5;
1186 		if (CFG_IST(ha, CFG_CTRL_2422)) {
1187 			tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1188 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
1189 			tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1190 		}
1191 		bcopy(ha->loginparams.node_ww_name.raw_wwn,
1192 		    tran->fca_perm_pwwn.raw_wwn, 8);
1193 
1194 		EL(ha, "FCA version %d\n", tran->fca_version);
1195 
1196 		/* Specify the amount of space needed in each packet */
1197 		tran->fca_pkt_size = sizeof (ql_srb_t);
1198 
1199 		/* command limits are usually dictated by hardware */
1200 		tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1201 
1202 		/* dmaattr are static, set elsewhere. */
1203 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1204 /* XXX need to remove */
1205 /*			tran->fca_dma_lim = &ql_64bit_dma_limit; */
1206 			tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1207 			tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1208 			tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1209 			tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1210 			tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1211 			tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1212 			tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1213 			tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1214 		} else {
1215 /*			tran->fca_dma_lim = &ql_32bit_dma_limit; */
1216 			tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1217 			tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1218 			tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1219 			tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1220 			tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1221 			tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1222 			tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1223 			tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1224 		}
1225 
1226 		tran->fca_acc_attr = &ql_dev_acc_attr;
1227 		tran->fca_iblock = &(ha->iblock_cookie);
1228 
1229 		/* the remaining values are simply function vectors */
1230 		tran->fca_bind_port = ql_bind_port;
1231 		tran->fca_unbind_port = ql_unbind_port;
1232 		tran->fca_init_pkt = ql_init_pkt;
1233 		tran->fca_un_init_pkt = ql_un_init_pkt;
1234 		tran->fca_els_send = ql_els_send;
1235 		tran->fca_get_cap = ql_get_cap;
1236 		tran->fca_set_cap = ql_set_cap;
1237 		tran->fca_getmap = ql_getmap;
1238 		tran->fca_transport = ql_transport;
1239 		tran->fca_ub_alloc = ql_ub_alloc;
1240 		tran->fca_ub_free = ql_ub_free;
1241 		tran->fca_ub_release = ql_ub_release;
1242 		tran->fca_abort = ql_abort;
1243 		tran->fca_reset = ql_reset;
1244 		tran->fca_port_manage = ql_port_manage;
1245 		tran->fca_get_device = ql_get_device;
1246 		tran->fca_notify = ql_notify;
1247 
1248 		/* give it to the FC transport */
1249 		if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1250 			cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1251 			    instance);
1252 			goto attach_failed;
1253 		}
1254 		progress |= QL_FCA_ATTACH_DONE;
1255 
1256 		/* Stash the structure so it can be freed at detach */
1257 		ha->tran = tran;
1258 
1259 		/* Acquire global state lock. */
1260 		GLOBAL_STATE_LOCK();
1261 
1262 		/* Add adapter structure to link list. */
1263 		ql_add_link_b(&ql_hba, &ha->hba);
1264 
1265 		/* Start one second driver timer. */
1266 		if (ql_timer_timeout_id == NULL) {
1267 			ql_timer_ticks = drv_usectohz(1000000);
1268 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1269 			    ql_timer_ticks);
1270 		}
1271 
1272 		/* Release global state lock. */
1273 		GLOBAL_STATE_UNLOCK();
1274 
1275 		/* Determine and populate HBA fru info */
1276 		ql_setup_fruinfo(ha);
1277 
1278 		/* Setup task_daemon thread. */
1279 		(void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1280 		    0, &p0, TS_RUN, minclsyspri);
1281 
1282 		progress |= QL_TASK_DAEMON_STARTED;
1283 
1284 		ddi_report_dev(dip);
1285 
1286 		/* Disable link reset in panic path */
1287 		ha->lip_on_panic = 1;
1288 
1289 		rval = DDI_SUCCESS;
1290 		break;
1291 
1292 attach_failed:
1293 		if (progress & QL_FCA_ATTACH_DONE) {
1294 			(void) fc_fca_detach(dip);
1295 			progress &= ~QL_FCA_ATTACH_DONE;
1296 		}
1297 
1298 		if (progress & QL_FCA_TRAN_ALLOCED) {
1299 			kmem_free(tran, sizeof (fc_fca_tran_t));
1300 			progress &= ~QL_FCA_TRAN_ALLOCED;
1301 		}
1302 
1303 		if (progress & QL_MINOR_NODE_CREATED) {
1304 			ddi_remove_minor_node(dip, "devctl");
1305 			progress &= ~QL_MINOR_NODE_CREATED;
1306 		}
1307 
1308 		if (progress & QL_KSTAT_CREATED) {
1309 			kstat_delete(ha->k_stats);
1310 			progress &= ~QL_KSTAT_CREATED;
1311 		}
1312 
1313 		if (progress & QL_TASK_DAEMON_STARTED) {
1314 			TASK_DAEMON_LOCK(ha);
1315 
1316 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1317 
1318 			cv_signal(&ha->cv_task_daemon);
1319 
1320 			/* Release task daemon lock. */
1321 			TASK_DAEMON_UNLOCK(ha);
1322 
1323 			/* Wait for for task daemon to stop running. */
1324 			while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1325 				ql_delay(ha, 10000);
1326 			}
1327 			progress &= ~QL_TASK_DAEMON_STARTED;
1328 		}
1329 
1330 		if (progress & QL_IOMAP_IOBASE_MAPPED) {
1331 			ddi_regs_map_free(&ha->iomap_dev_handle);
1332 			progress &= ~QL_IOMAP_IOBASE_MAPPED;
1333 		}
1334 
1335 		if (progress & QL_CONFIG_SPACE_SETUP) {
1336 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
1337 				ddi_regs_map_free(&ha->sbus_config_handle);
1338 			} else {
1339 				pci_config_teardown(&ha->pci_handle);
1340 			}
1341 			progress &= ~QL_CONFIG_SPACE_SETUP;
1342 		}
1343 
1344 		if (progress & QL_INTR_ADDED) {
1345 			ql_disable_intr(ha);
1346 			ql_release_intr(ha);
1347 			progress &= ~QL_INTR_ADDED;
1348 		}
1349 
1350 		if (progress & QL_MUTEX_CV_INITED) {
1351 			ql_destroy_mutex(ha);
1352 			progress &= ~QL_MUTEX_CV_INITED;
1353 		}
1354 
1355 		if (progress & QL_HBA_BUFFER_SETUP) {
1356 			ql_free_phys(ha, &ha->hba_buf);
1357 			progress &= ~QL_HBA_BUFFER_SETUP;
1358 		}
1359 
1360 		if (progress & QL_REGS_MAPPED) {
1361 			ddi_regs_map_free(&ha->dev_handle);
1362 			if (ha->sbus_fpga_iobase != NULL) {
1363 				ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1364 			}
1365 			progress &= ~QL_REGS_MAPPED;
1366 		}
1367 
1368 		if (progress & QL_SOFT_STATE_ALLOCED) {
1369 
1370 			ql_fcache_rel(ha->fcache);
1371 
1372 			ASSERT(ha->dev && ha->outstanding_cmds &&
1373 			    ha->ub_array && ha->adapter_stats);
1374 
1375 			kmem_free(ha->adapter_stats,
1376 			    sizeof (*ha->adapter_stats));
1377 
1378 			kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1379 			    QL_UB_LIMIT);
1380 
1381 			kmem_free(ha->outstanding_cmds,
1382 			    sizeof (*ha->outstanding_cmds) *
1383 			    MAX_OUTSTANDING_COMMANDS);
1384 
1385 			if (ha->devpath != NULL) {
1386 				kmem_free(ha->devpath,
1387 				    strlen(ha->devpath) + 1);
1388 			}
1389 
1390 			kmem_free(ha->dev, sizeof (*ha->dev) *
1391 			    DEVICE_HEAD_LIST_SIZE);
1392 
1393 			if (ha->xioctl != NULL) {
1394 				ql_free_xioctl_resource(ha);
1395 			}
1396 
1397 			if (ha->fw_module != NULL) {
1398 				(void) ddi_modclose(ha->fw_module);
1399 			}
1400 
1401 			ddi_soft_state_free(ql_state, instance);
1402 			progress &= ~QL_SOFT_STATE_ALLOCED;
1403 		}
1404 		ASSERT(progress == 0);
1405 
1406 		ddi_prop_remove_all(dip);
1407 		rval = DDI_FAILURE;
1408 		break;
1409 
1410 	case DDI_RESUME:
1411 		rval = DDI_FAILURE;
1412 
1413 		ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1414 		if (ha == NULL) {
1415 			cmn_err(CE_WARN, "%s(%d): can't get soft state",
1416 			    QL_NAME, instance);
1417 			break;
1418 		}
1419 
1420 		if (ha->flags & TARGET_MODE_INITIALIZED) {
1421 			/* Enable Target Mode */
1422 			ha->init_ctrl_blk.cb.lun_enables[0] = (uint8_t)
1423 			    (ha->init_ctrl_blk.cb.lun_enables[0] | 0x01);
1424 			ha->init_ctrl_blk.cb.immediate_notify_resouce_count =
1425 			    ha->ub_notify_count;
1426 			ha->init_ctrl_blk.cb.command_resouce_count =
1427 			    ha->ub_command_count;
1428 		} else {
1429 			ha->init_ctrl_blk.cb.lun_enables[0] = 0;
1430 			ha->init_ctrl_blk.cb.lun_enables[1] = 0;
1431 			ha->init_ctrl_blk.cb.immediate_notify_resouce_count =
1432 			    0;
1433 			ha->init_ctrl_blk.cb.command_resouce_count = 0;
1434 		}
1435 
1436 		ha->power_level = PM_LEVEL_D3;
1437 		if (ha->pm_capable) {
1438 			/*
1439 			 * Get ql_power to do power on initialization
1440 			 */
1441 			if (pm_raise_power(dip, QL_POWER_COMPONENT,
1442 			    PM_LEVEL_D0) != DDI_SUCCESS) {
1443 				cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1444 				    " power", QL_NAME, instance);
1445 			}
1446 		}
1447 
1448 		/*
1449 		 * There is a bug in DR that prevents PM framework
1450 		 * from calling ql_power.
1451 		 */
1452 		if (ha->power_level == PM_LEVEL_D3) {
1453 			ha->power_level = PM_LEVEL_D0;
1454 
1455 			if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1456 				cmn_err(CE_WARN, "%s(%d): can't initialize the"
1457 				    " adapter", QL_NAME, instance);
1458 			}
1459 
1460 			/* Wake up task_daemon. */
1461 			ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1462 			    0);
1463 		}
1464 
1465 		/* Acquire global state lock. */
1466 		GLOBAL_STATE_LOCK();
1467 
1468 		/* Restart driver timer. */
1469 		if (ql_timer_timeout_id == NULL) {
1470 			ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1471 			    ql_timer_ticks);
1472 		}
1473 
1474 		/* Release global state lock. */
1475 		GLOBAL_STATE_UNLOCK();
1476 
1477 		/* Wake up command start routine. */
1478 		ADAPTER_STATE_LOCK(ha);
1479 		ha->flags &= ~ADAPTER_SUSPENDED;
1480 		ADAPTER_STATE_UNLOCK(ha);
1481 
1482 		/*
1483 		 * Transport doesn't make FC discovery in polled
1484 		 * mode; So we need the daemon thread's services
1485 		 * right here.
1486 		 */
1487 		(void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1488 
1489 		rval = DDI_SUCCESS;
1490 
1491 		/* Restart IP if it was running. */
1492 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1493 			(void) ql_initialize_ip(ha);
1494 			ql_isp_rcvbuf(ha);
1495 		}
1496 		break;
1497 
1498 	default:
1499 		cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1500 		    " %x", QL_NAME, ddi_get_instance(dip), cmd);
1501 		rval = DDI_FAILURE;
1502 		break;
1503 	}
1504 
1505 	kmem_free(buf, MAXPATHLEN);
1506 
1507 	if (rval != DDI_SUCCESS) {
1508 		/*EMPTY*/
1509 		QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1510 		    ddi_get_instance(dip), rval);
1511 	} else {
1512 		/*EMPTY*/
1513 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1514 	}
1515 
1516 	return (rval);
1517 }
1518 
1519 /*
1520  * ql_detach
1521  *	Used to remove all the states associated with a given
1522  *	instances of a device node prior to the removal of that
1523  *	instance from the system.
1524  *
1525  * Input:
1526  *	dip = pointer to device information structure.
1527  *	cmd = type of detach.
1528  *
1529  * Returns:
1530  *	DDI_SUCCESS or DDI_FAILURE.
1531  *
1532  * Context:
1533  *	Kernel context.
1534  */
1535 static int
1536 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1537 {
1538 	ql_adapter_state_t	*ha, *vha;
1539 	ql_tgt_t		*tq;
1540 	int			try;
1541 	uint16_t		index;
1542 	ql_link_t		*link;
1543 	char			*buf;
1544 	timeout_id_t		timer_id = NULL;
1545 	int			rval = DDI_SUCCESS;
1546 
1547 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1548 	if (ha == NULL) {
1549 		QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1550 		    ddi_get_instance(dip));
1551 		return (DDI_FAILURE);
1552 	}
1553 
1554 	QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1555 
1556 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1557 
1558 	switch (cmd) {
1559 	case DDI_DETACH:
1560 		ADAPTER_STATE_LOCK(ha);
1561 		ha->flags |= (ADAPTER_SUSPENDED | COMMAND_ABORT_TIMEOUT);
1562 		ADAPTER_STATE_UNLOCK(ha);
1563 
1564 		/* Acquire task daemon lock. */
1565 		TASK_DAEMON_LOCK(ha);
1566 
1567 		ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1568 		cv_signal(&ha->cv_task_daemon);
1569 
1570 		/* Release task daemon lock. */
1571 		TASK_DAEMON_UNLOCK(ha);
1572 
1573 		/*
1574 		 * Wait for task daemon to stop running.
1575 		 * Internal command timeout is approximately
1576 		 * 30 seconds, so it would help in some corner
1577 		 * cases to wait that long
1578 		 */
1579 		try = 0;
1580 		while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) &&
1581 		    try < 3000) {
1582 			ql_delay(ha, 10000);
1583 			try++;
1584 		}
1585 
1586 		TASK_DAEMON_LOCK(ha);
1587 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1588 			ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1589 			TASK_DAEMON_UNLOCK(ha);
1590 			EL(ha, "failed, could not stop task daemon\n");
1591 			return (DDI_FAILURE);
1592 		}
1593 		TASK_DAEMON_UNLOCK(ha);
1594 
1595 		/* Acquire global state lock. */
1596 		GLOBAL_STATE_LOCK();
1597 
1598 		/* Disable driver timer if no adapters. */
1599 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1600 		    ql_hba.last == &ha->hba) {
1601 			timer_id = ql_timer_timeout_id;
1602 			ql_timer_timeout_id = NULL;
1603 		}
1604 		ql_remove_link(&ql_hba, &ha->hba);
1605 
1606 		GLOBAL_STATE_UNLOCK();
1607 
1608 		if (timer_id) {
1609 			(void) untimeout(timer_id);
1610 		}
1611 
1612 		if (ha->pm_capable) {
1613 			if (pm_lower_power(dip, QL_POWER_COMPONENT,
1614 			    PM_LEVEL_D3) != DDI_SUCCESS) {
1615 				cmn_err(CE_WARN, "%s(%d): failed to lower the"
1616 				    " power", QL_NAME, ha->instance);
1617 			}
1618 		}
1619 
1620 		/*
1621 		 * If pm_lower_power shutdown the adapter, there
1622 		 * isn't much else to do
1623 		 */
1624 		if (ha->power_level != PM_LEVEL_D3) {
1625 			ql_halt(ha, PM_LEVEL_D3);
1626 		}
1627 
1628 		/* Remove virtual ports. */
1629 		while ((vha = ha->vp_next) != NULL) {
1630 			ql_vport_destroy(vha);
1631 		}
1632 
1633 		/* Free target queues. */
1634 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1635 			link = ha->dev[index].first;
1636 			while (link != NULL) {
1637 				tq = link->base_address;
1638 				link = link->next;
1639 				ql_dev_free(ha, tq);
1640 			}
1641 		}
1642 
1643 		/*
1644 		 * Free unsolicited buffers.
1645 		 * If we are here then there are no ULPs still
1646 		 * alive that wish to talk to ql so free up
1647 		 * any SRB_IP_UB_UNUSED buffers that are
1648 		 * lingering around
1649 		 */
1650 		QL_UB_LOCK(ha);
1651 		for (index = 0; index < QL_UB_LIMIT; index++) {
1652 			fc_unsol_buf_t *ubp = ha->ub_array[index];
1653 
1654 			if (ubp != NULL) {
1655 				ql_srb_t *sp = ubp->ub_fca_private;
1656 
1657 				sp->flags |= SRB_UB_FREE_REQUESTED;
1658 
1659 				while (!(sp->flags & SRB_UB_IN_FCA) ||
1660 				    (sp->flags & (SRB_UB_CALLBACK |
1661 				    SRB_UB_ACQUIRED))) {
1662 					QL_UB_UNLOCK(ha);
1663 					delay(drv_usectohz(100000));
1664 					QL_UB_LOCK(ha);
1665 				}
1666 				ha->ub_array[index] = NULL;
1667 
1668 				QL_UB_UNLOCK(ha);
1669 				ql_free_unsolicited_buffer(ha, ubp);
1670 				QL_UB_LOCK(ha);
1671 			}
1672 		}
1673 		QL_UB_UNLOCK(ha);
1674 
1675 		/* Free any saved RISC code. */
1676 		if (ha->risc_code != NULL) {
1677 			kmem_free(ha->risc_code, ha->risc_code_size);
1678 			ha->risc_code = NULL;
1679 			ha->risc_code_size = 0;
1680 		}
1681 
1682 		if (ha->fw_module != NULL) {
1683 			(void) ddi_modclose(ha->fw_module);
1684 			ha->fw_module = NULL;
1685 		}
1686 
1687 		/* Free resources. */
1688 		ddi_prop_remove_all(dip);
1689 		(void) fc_fca_detach(dip);
1690 		kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1691 		ddi_remove_minor_node(dip, "devctl");
1692 		if (ha->k_stats != NULL) {
1693 			kstat_delete(ha->k_stats);
1694 		}
1695 
1696 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
1697 			ddi_regs_map_free(&ha->sbus_config_handle);
1698 		} else {
1699 			ddi_regs_map_free(&ha->iomap_dev_handle);
1700 			pci_config_teardown(&ha->pci_handle);
1701 		}
1702 
1703 		ql_disable_intr(ha);
1704 		ql_release_intr(ha);
1705 
1706 		ql_free_xioctl_resource(ha);
1707 
1708 		ql_destroy_mutex(ha);
1709 
1710 		ql_free_phys(ha, &ha->hba_buf);
1711 		ql_free_phys(ha, &ha->fwexttracebuf);
1712 		ql_free_phys(ha, &ha->fwfcetracebuf);
1713 
1714 		ddi_regs_map_free(&ha->dev_handle);
1715 		if (ha->sbus_fpga_iobase != NULL) {
1716 			ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1717 		}
1718 
1719 		ql_fcache_rel(ha->fcache);
1720 		if (ha->vcache != NULL) {
1721 			kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1722 		}
1723 
1724 		if (ha->pi_attrs != NULL) {
1725 			kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1726 		}
1727 
1728 		ASSERT(ha->dev && ha->outstanding_cmds && ha->ub_array &&
1729 		    ha->adapter_stats);
1730 
1731 		kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1732 
1733 		kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1734 
1735 		kmem_free(ha->outstanding_cmds, sizeof (*ha->outstanding_cmds) *
1736 		    MAX_OUTSTANDING_COMMANDS);
1737 
1738 		if (ha->devpath != NULL) {
1739 			kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1740 		}
1741 
1742 		kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1743 
1744 		EL(ha, "detached\n");
1745 
1746 		ddi_soft_state_free(ql_state, ha->instance);
1747 
1748 		break;
1749 
1750 	case DDI_SUSPEND:
1751 		ADAPTER_STATE_LOCK(ha);
1752 
1753 		try = 0;
1754 		ha->flags |= ADAPTER_SUSPENDED;
1755 		while (ha->flags & ADAPTER_TIMER_BUSY && try++ < 10) {
1756 			ADAPTER_STATE_UNLOCK(ha);
1757 			delay(drv_usectohz(1000000));
1758 			ADAPTER_STATE_LOCK(ha);
1759 		}
1760 		if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1761 			ha->flags &= ~ADAPTER_SUSPENDED;
1762 			ADAPTER_STATE_UNLOCK(ha);
1763 			rval = DDI_FAILURE;
1764 			cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1765 			    " busy %xh flags %xh", QL_NAME, ha->instance,
1766 			    ha->busy, ha->flags);
1767 			break;
1768 		}
1769 
1770 		ADAPTER_STATE_UNLOCK(ha);
1771 
1772 		if (ha->flags & IP_INITIALIZED) {
1773 			(void) ql_shutdown_ip(ha);
1774 		}
1775 
1776 		try = ql_suspend_adapter(ha);
1777 		if (try != QL_SUCCESS) {
1778 			ADAPTER_STATE_LOCK(ha);
1779 			ha->flags &= ~ADAPTER_SUSPENDED;
1780 			ADAPTER_STATE_UNLOCK(ha);
1781 			cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1782 			    QL_NAME, ha->instance, try);
1783 
1784 			/* Restart IP if it was running. */
1785 			if (ha->flags & IP_ENABLED &&
1786 			    !(ha->flags & IP_INITIALIZED)) {
1787 				(void) ql_initialize_ip(ha);
1788 				ql_isp_rcvbuf(ha);
1789 			}
1790 			rval = DDI_FAILURE;
1791 			break;
1792 		}
1793 
1794 		/* Acquire global state lock. */
1795 		GLOBAL_STATE_LOCK();
1796 
1797 		/* Disable driver timer if last adapter. */
1798 		if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1799 		    ql_hba.last == &ha->hba) {
1800 			timer_id = ql_timer_timeout_id;
1801 			ql_timer_timeout_id = NULL;
1802 		}
1803 		GLOBAL_STATE_UNLOCK();
1804 
1805 		if (timer_id) {
1806 			(void) untimeout(timer_id);
1807 		}
1808 
1809 		break;
1810 
1811 	default:
1812 		rval = DDI_FAILURE;
1813 		break;
1814 	}
1815 
1816 	kmem_free(buf, MAXPATHLEN);
1817 
1818 	if (rval != DDI_SUCCESS) {
1819 		if (ha != NULL) {
1820 			EL(ha, "failed, rval = %xh\n", rval);
1821 		} else {
1822 			/*EMPTY*/
1823 			QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1824 			    ddi_get_instance(dip), rval);
1825 		}
1826 	} else {
1827 		/*EMPTY*/
1828 		QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1829 	}
1830 
1831 	return (rval);
1832 }
1833 
1834 /*
1835  * ql_power
1836  *	Power a device attached to the system.
1837  *
1838  * Input:
1839  *	dip = pointer to device information structure.
1840  *	component = device.
1841  *	level = power level.
1842  *
1843  * Returns:
1844  *	DDI_SUCCESS or DDI_FAILURE.
1845  *
1846  * Context:
1847  *	Kernel context.
1848  */
1849 /* ARGSUSED */
1850 static int
1851 ql_power(dev_info_t *dip, int component, int level)
1852 {
1853 	int			rval = DDI_FAILURE;
1854 	off_t			csr;
1855 	uint8_t			saved_pm_val;
1856 	ql_adapter_state_t	*ha;
1857 	char			*buf;
1858 	char			*path;
1859 
1860 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1861 	if (ha == NULL || ha->pm_capable == 0) {
1862 		QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
1863 		    ddi_get_instance(dip));
1864 		return (rval);
1865 	}
1866 
1867 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
1868 
1869 	buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1870 	path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1871 
1872 	if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
1873 	    level != PM_LEVEL_D3)) {
1874 		EL(ha, "invalid, component=%xh or level=%xh\n",
1875 		    component, level);
1876 		return (rval);
1877 	}
1878 
1879 	csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
1880 	ASSERT(csr == QL_PM_CS_REG);
1881 
1882 	(void) snprintf(buf, sizeof (buf),
1883 	    "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
1884 	    ddi_pathname(dip, path));
1885 
1886 	switch (level) {
1887 	case PM_LEVEL_D0:	/* power up to D0 state - fully on */
1888 
1889 		QL_PM_LOCK(ha);
1890 		if (ha->power_level == PM_LEVEL_D0) {
1891 			QL_PM_UNLOCK(ha);
1892 			rval = DDI_SUCCESS;
1893 			break;
1894 		}
1895 
1896 		/*
1897 		 * Enable interrupts now
1898 		 */
1899 		saved_pm_val = ha->power_level;
1900 		ha->power_level = PM_LEVEL_D0;
1901 		QL_PM_UNLOCK(ha);
1902 
1903 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
1904 
1905 		/*
1906 		 * Delay after reset, for chip to recover.
1907 		 * Otherwise causes system PANIC
1908 		 */
1909 		drv_usecwait(20000);
1910 
1911 		if (ha->config_saved) {
1912 			ha->config_saved = 0;
1913 			if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1914 				QL_PM_LOCK(ha);
1915 				ha->power_level = saved_pm_val;
1916 				QL_PM_UNLOCK(ha);
1917 				cmn_err(CE_WARN, "%s failed to restore "
1918 				    "config regs", buf);
1919 				break;
1920 			}
1921 		}
1922 
1923 		if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1924 			cmn_err(CE_WARN, "%s adapter initialization failed",
1925 			    buf);
1926 		}
1927 
1928 		/* Wake up task_daemon. */
1929 		ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
1930 		    TASK_DAEMON_SLEEPING_FLG, 0);
1931 
1932 		/* Restart IP if it was running. */
1933 		if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1934 			(void) ql_initialize_ip(ha);
1935 			ql_isp_rcvbuf(ha);
1936 		}
1937 
1938 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
1939 		    ha->instance, QL_NAME);
1940 
1941 		rval = DDI_SUCCESS;
1942 		break;
1943 
1944 	case PM_LEVEL_D3:	/* power down to D3 state - off */
1945 
1946 		QL_PM_LOCK(ha);
1947 
1948 		if (ha->busy || ((ha->task_daemon_flags &
1949 		    TASK_DAEMON_SLEEPING_FLG) == 0)) {
1950 			QL_PM_UNLOCK(ha);
1951 			break;
1952 		}
1953 
1954 		if (ha->power_level == PM_LEVEL_D3) {
1955 			rval = DDI_SUCCESS;
1956 			QL_PM_UNLOCK(ha);
1957 			break;
1958 		}
1959 		QL_PM_UNLOCK(ha);
1960 
1961 		if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
1962 			cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
1963 			    " config regs", QL_NAME, ha->instance, buf);
1964 			break;
1965 		}
1966 		ha->config_saved = 1;
1967 
1968 		/*
1969 		 * Don't enable interrupts. Running mailbox commands with
1970 		 * interrupts enabled could cause hangs since pm_run_scan()
1971 		 * runs out of a callout thread and on single cpu systems
1972 		 * cv_timedwait(), called from ql_mailbox_command(), would
1973 		 * not get to run.
1974 		 */
1975 		TASK_DAEMON_LOCK(ha);
1976 		ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
1977 		TASK_DAEMON_UNLOCK(ha);
1978 
1979 		ql_halt(ha, PM_LEVEL_D3);
1980 
1981 		/*
1982 		 * Setup ql_intr to ignore interrupts from here on.
1983 		 */
1984 		QL_PM_LOCK(ha);
1985 		ha->power_level = PM_LEVEL_D3;
1986 		QL_PM_UNLOCK(ha);
1987 
1988 		ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
1989 
1990 		cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
1991 		    ha->instance, QL_NAME);
1992 
1993 		rval = DDI_SUCCESS;
1994 		break;
1995 	}
1996 
1997 	kmem_free(buf, MAXPATHLEN);
1998 	kmem_free(path, MAXPATHLEN);
1999 
2000 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2001 
2002 	return (rval);
2003 }
2004 
2005 /* ************************************************************************ */
2006 /*		Fibre Channel Adapter (FCA) Transport Functions.	    */
2007 /* ************************************************************************ */
2008 
2009 /*
2010  * ql_bind_port
2011  *	Handling port binding. The FC Transport attempts to bind an FCA port
2012  *	when it is ready to start transactions on the port. The FC Transport
2013  *	will call the fca_bind_port() function specified in the fca_transport
2014  *	structure it receives. The FCA must fill in the port_info structure
2015  *	passed in the call and also stash the information for future calls.
2016  *
2017  * Input:
2018  *	dip = pointer to FCA information structure.
2019  *	port_info = pointer to port information structure.
2020  *	bind_info = pointer to bind information structure.
2021  *
2022  * Returns:
2023  *	NULL = failure
2024  *
2025  * Context:
2026  *	Kernel context.
2027  */
2028 static opaque_t
2029 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2030     fc_fca_bind_info_t *bind_info)
2031 {
2032 	ql_adapter_state_t	*ha, *vha;
2033 	opaque_t		fca_handle = NULL;
2034 	port_id_t		d_id;
2035 	int			port_npiv = bind_info->port_npiv;
2036 	uchar_t			*port_nwwn = bind_info->port_nwwn.raw_wwn;
2037 	uchar_t			*port_pwwn = bind_info->port_pwwn.raw_wwn;
2038 
2039 	/* get state info based on the dip */
2040 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2041 	if (ha == NULL) {
2042 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2043 		    (void *)fca_handle);
2044 		return (NULL);
2045 	}
2046 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2047 
2048 	/* Verify port number is supported. */
2049 	if (port_npiv != 0) {
2050 		if (!(ha->flags & VP_ENABLED)) {
2051 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2052 			    ha->instance);
2053 			port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2054 			return (NULL);
2055 		}
2056 		if (!(ha->flags & POINT_TO_POINT)) {
2057 			QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2058 			    ha->instance);
2059 			port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2060 			return (NULL);
2061 		}
2062 		if (!(ha->flags & FDISC_ENABLED)) {
2063 			QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2064 			    "FDISC\n", ha->instance);
2065 			port_info->pi_error = FC_NPIV_FDISC_FAILED;
2066 			return (NULL);
2067 		}
2068 		if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2069 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2070 			QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2071 			    "FC_OUTOFBOUNDS\n", ha->instance);
2072 			port_info->pi_error = FC_OUTOFBOUNDS;
2073 			return (NULL);
2074 		}
2075 	} else if (bind_info->port_num != 0) {
2076 		QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2077 		    "supported\n", ha->instance, bind_info->port_num);
2078 		port_info->pi_error = FC_OUTOFBOUNDS;
2079 		return (NULL);
2080 	}
2081 
2082 	/* Locate port context. */
2083 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
2084 		if (vha->vp_index == bind_info->port_num) {
2085 			break;
2086 		}
2087 	}
2088 
2089 	/* If virtual port does not exist. */
2090 	if (vha == NULL) {
2091 		vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2092 	}
2093 
2094 	/* make sure this port isn't already bound */
2095 	if (vha->flags & FCA_BOUND) {
2096 		port_info->pi_error = FC_ALREADY;
2097 	} else {
2098 		if (vha->vp_index != 0) {
2099 			bcopy(port_nwwn,
2100 			    vha->loginparams.node_ww_name.raw_wwn, 8);
2101 			bcopy(port_pwwn,
2102 			    vha->loginparams.nport_ww_name.raw_wwn, 8);
2103 		}
2104 		if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2105 			if (ql_vport_enable(vha) != QL_SUCCESS) {
2106 				QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2107 				    "virtual port=%d\n", ha->instance,
2108 				    vha->vp_index);
2109 				port_info->pi_error = FC_NPIV_FDISC_FAILED;
2110 				return (NULL);
2111 			}
2112 			cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2113 			    "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2114 			    "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2115 			    QL_NAME, ha->instance, vha->vp_index,
2116 			    port_pwwn[0], port_pwwn[1], port_pwwn[2],
2117 			    port_pwwn[3], port_pwwn[4], port_pwwn[5],
2118 			    port_pwwn[6], port_pwwn[7],
2119 			    port_nwwn[0], port_nwwn[1], port_nwwn[2],
2120 			    port_nwwn[3], port_nwwn[4], port_nwwn[5],
2121 			    port_nwwn[6], port_nwwn[7]);
2122 		}
2123 
2124 		/* stash the bind_info supplied by the FC Transport */
2125 		vha->bind_info.port_handle = bind_info->port_handle;
2126 		vha->bind_info.port_statec_cb =
2127 		    bind_info->port_statec_cb;
2128 		vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2129 
2130 		/* Set port's source ID. */
2131 		port_info->pi_s_id.port_id = vha->d_id.b24;
2132 
2133 		/* copy out the default login parameters */
2134 		bcopy((void *)&vha->loginparams,
2135 		    (void *)&port_info->pi_login_params,
2136 		    sizeof (la_els_logi_t));
2137 
2138 		/* Set port's hard address if enabled. */
2139 		port_info->pi_hard_addr.hard_addr = 0;
2140 		if (bind_info->port_num == 0) {
2141 			d_id.b24 = ha->d_id.b24;
2142 			if (CFG_IST(ha, CFG_CTRL_2425)) {
2143 				if (ha->init_ctrl_blk.cb24.
2144 				    firmware_options_1[0] & BIT_0) {
2145 					d_id.b.al_pa = ql_index_to_alpa[ha->
2146 					    init_ctrl_blk.cb24.
2147 					    hard_address[0]];
2148 					port_info->pi_hard_addr.hard_addr =
2149 					    d_id.b24;
2150 				}
2151 			} else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2152 			    BIT_0) {
2153 					d_id.b.al_pa = ql_index_to_alpa[ha->
2154 					    init_ctrl_blk.cb.hard_address[0]];
2155 					port_info->pi_hard_addr.hard_addr =
2156 					    d_id.b24;
2157 			}
2158 
2159 			/* Set the node id data */
2160 			if (ql_get_rnid_params(ha,
2161 			    sizeof (port_info->pi_rnid_params.params),
2162 			    (caddr_t)&port_info->pi_rnid_params.params) ==
2163 			    QL_SUCCESS) {
2164 				port_info->pi_rnid_params.status = FC_SUCCESS;
2165 			} else {
2166 				port_info->pi_rnid_params.status = FC_FAILURE;
2167 			}
2168 
2169 			/* Populate T11 FC-HBA details */
2170 			ql_populate_hba_fru_details(ha, port_info);
2171 			ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2172 			    KM_SLEEP);
2173 			if (ha->pi_attrs != NULL) {
2174 				bcopy(&port_info->pi_attrs, ha->pi_attrs,
2175 				    sizeof (fca_port_attrs_t));
2176 			}
2177 		} else {
2178 			port_info->pi_rnid_params.status = FC_FAILURE;
2179 			if (ha->pi_attrs != NULL) {
2180 				bcopy(ha->pi_attrs, &port_info->pi_attrs,
2181 				    sizeof (fca_port_attrs_t));
2182 			}
2183 		}
2184 
2185 		/* Generate handle for this FCA. */
2186 		fca_handle = (opaque_t)vha;
2187 
2188 		ADAPTER_STATE_LOCK(ha);
2189 		vha->flags |= FCA_BOUND;
2190 		ADAPTER_STATE_UNLOCK(ha);
2191 		/* Set port's current state. */
2192 		port_info->pi_port_state = vha->state;
2193 	}
2194 
2195 	QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2196 	    "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2197 	    port_info->pi_port_state, port_info->pi_s_id.port_id);
2198 
2199 	return (fca_handle);
2200 }
2201 
2202 /*
2203  * ql_unbind_port
2204  *	To unbind a Fibre Channel Adapter from an FC Port driver.
2205  *
2206  * Input:
2207  *	fca_handle = handle setup by ql_bind_port().
2208  *
2209  * Context:
2210  *	Kernel context.
2211  */
2212 static void
2213 ql_unbind_port(opaque_t fca_handle)
2214 {
2215 	ql_adapter_state_t	*ha;
2216 	ql_tgt_t		*tq;
2217 	uint32_t		flgs;
2218 
2219 	ha = ql_fca_handle_to_state(fca_handle);
2220 	if (ha == NULL) {
2221 		/*EMPTY*/
2222 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2223 		    (void *)fca_handle);
2224 	} else {
2225 		QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2226 		    ha->vp_index);
2227 
2228 		if (!(ha->flags & FCA_BOUND)) {
2229 			/*EMPTY*/
2230 			QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2231 			    ha->instance, ha->vp_index);
2232 		} else {
2233 			if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2234 				if ((tq = ql_loop_id_to_queue(ha,
2235 				    FL_PORT_24XX_HDL)) != NULL) {
2236 					(void) ql_log_iocb(ha, tq, tq->loop_id,
2237 					    CFO_FREE_N_PORT_HANDLE |
2238 					    CFO_EXPLICIT_LOGO | CF_CMD_LOGO,
2239 					    NULL);
2240 				}
2241 				(void) ql_vport_control(ha,
2242 				    VPC_DISABLE_LOGOUT);
2243 				flgs = FCA_BOUND | VP_ENABLED;
2244 			} else {
2245 				flgs = FCA_BOUND;
2246 			}
2247 			ADAPTER_STATE_LOCK(ha);
2248 			ha->flags &= ~flgs;
2249 			ADAPTER_STATE_UNLOCK(ha);
2250 		}
2251 
2252 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2253 		    ha->vp_index);
2254 	}
2255 }
2256 
2257 /*
2258  * ql_init_pkt
2259  *	Initialize FCA portion of packet.
2260  *
2261  * Input:
2262  *	fca_handle = handle setup by ql_bind_port().
2263  *	pkt = pointer to fc_packet.
2264  *
2265  * Returns:
2266  *	FC_SUCCESS - the packet has successfully been initialized.
2267  *	FC_UNBOUND - the fca_handle specified is not bound.
2268  *	FC_NOMEM - the FCA failed initialization due to an allocation error.
2269  *	FC_FAILURE - the FCA failed initialization for undisclosed reasons
2270  *
2271  * Context:
2272  *	Kernel context.
2273  */
2274 /* ARGSUSED */
2275 static int
2276 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2277 {
2278 	ql_adapter_state_t	*ha;
2279 	ql_srb_t		*sp;
2280 
2281 	ha = ql_fca_handle_to_state(fca_handle);
2282 	if (ha == NULL) {
2283 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2284 		    (void *)fca_handle);
2285 		return (FC_UNBOUND);
2286 	}
2287 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2288 
2289 	ASSERT(ha->power_level == PM_LEVEL_D0);
2290 
2291 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2292 	sp->flags = 0;
2293 
2294 	/* init cmd links */
2295 	sp->cmd.base_address = sp;
2296 	sp->cmd.prev = NULL;
2297 	sp->cmd.next = NULL;
2298 	sp->cmd.head = NULL;
2299 
2300 	/* init watchdog links */
2301 	sp->wdg.base_address = sp;
2302 	sp->wdg.prev = NULL;
2303 	sp->wdg.next = NULL;
2304 	sp->wdg.head = NULL;
2305 	sp->pkt = pkt;
2306 	sp->ha = ha;
2307 	sp->magic_number = QL_FCA_BRAND;
2308 
2309 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2310 
2311 	return (FC_SUCCESS);
2312 }
2313 
2314 /*
2315  * ql_un_init_pkt
2316  *	Release all local resources bound to packet.
2317  *
2318  * Input:
2319  *	fca_handle = handle setup by ql_bind_port().
2320  *	pkt = pointer to fc_packet.
2321  *
2322  * Returns:
2323  *	FC_SUCCESS - the packet has successfully been invalidated.
2324  *	FC_UNBOUND - the fca_handle specified is not bound.
2325  *	FC_BADPACKET - the packet has not been initialized or has
2326  *			already been freed by this FCA.
2327  *
2328  * Context:
2329  *	Kernel context.
2330  */
2331 static int
2332 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2333 {
2334 	ql_adapter_state_t *ha;
2335 	int rval;
2336 	ql_srb_t *sp;
2337 
2338 	ha = ql_fca_handle_to_state(fca_handle);
2339 	if (ha == NULL) {
2340 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2341 		    (void *)fca_handle);
2342 		return (FC_UNBOUND);
2343 	}
2344 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2345 
2346 	sp = (ql_srb_t *)pkt->pkt_fca_private;
2347 	ASSERT(sp->magic_number == QL_FCA_BRAND);
2348 
2349 	if (sp->magic_number != QL_FCA_BRAND) {
2350 		EL(ha, "failed, FC_BADPACKET\n");
2351 		rval = FC_BADPACKET;
2352 	} else {
2353 		sp->magic_number = NULL;
2354 
2355 		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
2356 		    SRB_IN_TOKEN_ARRAY)) == 0);
2357 
2358 		rval = FC_SUCCESS;
2359 	}
2360 
2361 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2362 
2363 	return (rval);
2364 }
2365 
2366 /*
2367  * ql_els_send
2368  *	Issue a extended link service request.
2369  *
2370  * Input:
2371  *	fca_handle = handle setup by ql_bind_port().
2372  *	pkt = pointer to fc_packet.
2373  *
2374  * Returns:
2375  *	FC_SUCCESS - the command was successful.
2376  *	FC_ELS_FREJECT - the command was rejected by a Fabric.
2377  *	FC_ELS_PREJECT - the command was rejected by an N-port.
2378  *	FC_TRANSPORT_ERROR - a transport error occurred.
2379  *	FC_UNBOUND - the fca_handle specified is not bound.
2380  *	FC_ELS_BAD - the FCA can not issue the requested ELS.
2381  *
2382  * Context:
2383  *	Kernel context.
2384  */
2385 static int
2386 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2387 {
2388 	ql_adapter_state_t	*ha;
2389 	int			rval;
2390 	clock_t			timer;
2391 	ls_code_t		els;
2392 	la_els_rjt_t		rjt;
2393 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2394 
2395 	/* Verify proper command. */
2396 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2397 	if (ha == NULL) {
2398 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2399 		    rval, fca_handle);
2400 		return (FC_INVALID_REQUEST);
2401 	}
2402 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2403 
2404 	ASSERT(ha->power_level == PM_LEVEL_D0);
2405 
2406 	/* Wait for suspension to end. */
2407 	TASK_DAEMON_LOCK(ha);
2408 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2409 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2410 
2411 		/* 30 seconds from now */
2412 		timer = ddi_get_lbolt();
2413 		timer += drv_usectohz(30000000);
2414 
2415 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2416 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2417 			/*
2418 			 * The timeout time 'timer' was
2419 			 * reached without the condition
2420 			 * being signaled.
2421 			 */
2422 			pkt->pkt_state = FC_PKT_TRAN_BSY;
2423 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
2424 
2425 			/* Release task daemon lock. */
2426 			TASK_DAEMON_UNLOCK(ha);
2427 
2428 			EL(ha, "QL_SUSPENDED failed=%xh\n",
2429 			    QL_FUNCTION_TIMEOUT);
2430 			return (FC_TRAN_BUSY);
2431 		}
2432 	}
2433 	/* Release task daemon lock. */
2434 	TASK_DAEMON_UNLOCK(ha);
2435 
2436 	/* Setup response header. */
2437 	bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2438 	    sizeof (fc_frame_hdr_t));
2439 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2440 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2441 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2442 	    R_CTL_SOLICITED_CONTROL;
2443 	pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2444 	    F_CTL_END_SEQ;
2445 
2446 	sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2447 	    SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2448 	    SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2449 
2450 	/* map the type of ELS to a function */
2451 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2452 	    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2453 
2454 #if 0
2455 	QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2456 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2457 	    sizeof (fc_frame_hdr_t) / 4);
2458 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2459 	QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2460 #endif
2461 	switch (els.ls_code) {
2462 	case LA_ELS_RJT:
2463 	case LA_ELS_ACC:
2464 		pkt->pkt_state = FC_PKT_SUCCESS;
2465 		rval = FC_SUCCESS;
2466 		break;
2467 	case LA_ELS_PLOGI:
2468 	case LA_ELS_PDISC:
2469 		rval = ql_els_plogi(ha, pkt);
2470 		break;
2471 	case LA_ELS_FLOGI:
2472 	case LA_ELS_FDISC:
2473 		rval = ql_els_flogi(ha, pkt);
2474 		break;
2475 	case LA_ELS_LOGO:
2476 		rval = ql_els_logo(ha, pkt);
2477 		break;
2478 	case LA_ELS_PRLI:
2479 		rval = ql_els_prli(ha, pkt);
2480 		break;
2481 	case LA_ELS_PRLO:
2482 		rval = ql_els_prlo(ha, pkt);
2483 		break;
2484 	case LA_ELS_ADISC:
2485 		rval = ql_els_adisc(ha, pkt);
2486 		break;
2487 	case LA_ELS_LINIT:
2488 		rval = ql_els_linit(ha, pkt);
2489 		break;
2490 	case LA_ELS_LPC:
2491 		rval = ql_els_lpc(ha, pkt);
2492 		break;
2493 	case LA_ELS_LSTS:
2494 		rval = ql_els_lsts(ha, pkt);
2495 		break;
2496 	case LA_ELS_SCR:
2497 		rval = ql_els_scr(ha, pkt);
2498 		break;
2499 	case LA_ELS_RSCN:
2500 		rval = ql_els_rscn(ha, pkt);
2501 		break;
2502 	case LA_ELS_FARP_REQ:
2503 		rval = ql_els_farp_req(ha, pkt);
2504 		break;
2505 	case LA_ELS_FARP_REPLY:
2506 		rval = ql_els_farp_reply(ha, pkt);
2507 		break;
2508 	case LA_ELS_RLS:
2509 		rval = ql_els_rls(ha, pkt);
2510 		break;
2511 	case LA_ELS_RNID:
2512 		rval = ql_els_rnid(ha, pkt);
2513 		break;
2514 	default:
2515 		EL(ha, "failed=%xh, UNSUPPORTED\n", els.ls_code);
2516 		/* Build RJT. */
2517 		bzero(&rjt, sizeof (rjt));
2518 		rjt.ls_code.ls_code = LA_ELS_RJT;
2519 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2520 
2521 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2522 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2523 
2524 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
2525 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2526 		rval = FC_SUCCESS;
2527 		break;
2528 	}
2529 
2530 #if 0
2531 	QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2532 	QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2533 	    sizeof (fc_frame_hdr_t) / 4);
2534 #endif
2535 	/* Do command callback only on error */
2536 	if (rval == FC_SUCCESS && !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
2537 	    pkt->pkt_comp) {
2538 		ql_awaken_task_daemon(ha, sp, 0, 0);
2539 	}
2540 
2541 	if (rval != FC_SUCCESS) {
2542 		EL(ha, "failed, rval = %xh\n", rval);
2543 	} else {
2544 		/*EMPTY*/
2545 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2546 	}
2547 	return (rval);
2548 }
2549 
2550 /*
2551  * ql_get_cap
2552  *	Export FCA hardware and software capabilities.
2553  *
2554  * Input:
2555  *	fca_handle = handle setup by ql_bind_port().
2556  *	cap = pointer to the capabilities string.
2557  *	ptr = buffer pointer for return capability.
2558  *
2559  * Returns:
2560  *	FC_CAP_ERROR - no such capability
2561  *	FC_CAP_FOUND - the capability was returned and cannot be set
2562  *	FC_CAP_SETTABLE - the capability was returned and can be set
2563  *	FC_UNBOUND - the fca_handle specified is not bound.
2564  *
2565  * Context:
2566  *	Kernel context.
2567  */
2568 static int
2569 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2570 {
2571 	ql_adapter_state_t	*ha;
2572 	int			rval;
2573 	uint32_t		*rptr = (uint32_t *)ptr;
2574 
2575 	ha = ql_fca_handle_to_state(fca_handle);
2576 	if (ha == NULL) {
2577 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2578 		    (void *)fca_handle);
2579 		return (FC_UNBOUND);
2580 	}
2581 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2582 
2583 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2584 		bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2585 		    ptr, 8);
2586 		rval = FC_CAP_FOUND;
2587 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2588 		bcopy((void *)&ha->loginparams, ptr,
2589 		    sizeof (la_els_logi_t));
2590 		rval = FC_CAP_FOUND;
2591 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2592 		*rptr = (uint32_t)QL_UB_LIMIT;
2593 		rval = FC_CAP_FOUND;
2594 	} else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2595 
2596 		dev_info_t	*psydip = NULL;
2597 #ifdef __sparc
2598 		/*
2599 		 * Disable streaming for certain 2 chip adapters
2600 		 * below Psycho to handle Psycho byte hole issue.
2601 		 */
2602 		if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2603 		    (!CFG_IST(ha, CFG_SBUS_CARD))) {
2604 			for (psydip = ddi_get_parent(ha->dip); psydip;
2605 			    psydip = ddi_get_parent(psydip)) {
2606 				if (strcmp(ddi_driver_name(psydip),
2607 				    "pcipsy") == 0) {
2608 					break;
2609 				}
2610 			}
2611 		}
2612 #endif	/* __sparc */
2613 
2614 		if (psydip) {
2615 			*rptr = (uint32_t)FC_NO_STREAMING;
2616 			EL(ha, "No Streaming\n");
2617 		} else {
2618 			*rptr = (uint32_t)FC_ALLOW_STREAMING;
2619 			EL(ha, "Allow Streaming\n");
2620 		}
2621 		rval = FC_CAP_FOUND;
2622 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2623 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2624 			*rptr = (uint32_t)CHAR_TO_SHORT(
2625 			    ha->init_ctrl_blk.cb24.max_frame_length[0],
2626 			    ha->init_ctrl_blk.cb24.max_frame_length[1]);
2627 		} else {
2628 			*rptr = (uint32_t)CHAR_TO_SHORT(
2629 			    ha->init_ctrl_blk.cb.max_frame_length[0],
2630 			    ha->init_ctrl_blk.cb.max_frame_length[1]);
2631 		}
2632 		rval = FC_CAP_FOUND;
2633 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2634 		*rptr = FC_RESET_RETURN_ALL;
2635 		rval = FC_CAP_FOUND;
2636 	} else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2637 		*rptr = FC_NO_DVMA_SPACE;
2638 		rval = FC_CAP_FOUND;
2639 	} else {
2640 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2641 		rval = FC_CAP_ERROR;
2642 	}
2643 
2644 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2645 
2646 	return (rval);
2647 }
2648 
2649 /*
2650  * ql_set_cap
2651  *	Allow the FC Transport to set FCA capabilities if possible.
2652  *
2653  * Input:
2654  *	fca_handle = handle setup by ql_bind_port().
2655  *	cap = pointer to the capabilities string.
2656  *	ptr = buffer pointer for capability.
2657  *
2658  * Returns:
2659  *	FC_CAP_ERROR - no such capability
2660  *	FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2661  *	FC_CAP_SETTABLE - the capability was successfully set.
2662  *	FC_UNBOUND - the fca_handle specified is not bound.
2663  *
2664  * Context:
2665  *	Kernel context.
2666  */
2667 /* ARGSUSED */
2668 static int
2669 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2670 {
2671 	ql_adapter_state_t	*ha;
2672 	int			rval;
2673 
2674 	ha = ql_fca_handle_to_state(fca_handle);
2675 	if (ha == NULL) {
2676 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2677 		    (void *)fca_handle);
2678 		return (FC_UNBOUND);
2679 	}
2680 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2681 
2682 	if (strcmp(cap, FC_NODE_WWN) == 0) {
2683 		rval = FC_CAP_FOUND;
2684 	} else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2685 		rval = FC_CAP_FOUND;
2686 	} else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2687 		rval = FC_CAP_FOUND;
2688 	} else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2689 		rval = FC_CAP_FOUND;
2690 	} else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2691 		rval = FC_CAP_FOUND;
2692 	} else {
2693 		EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2694 		rval = FC_CAP_ERROR;
2695 	}
2696 
2697 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2698 
2699 	return (rval);
2700 }
2701 
2702 /*
2703  * ql_getmap
2704  *	Request of Arbitrated Loop (AL-PA) map.
2705  *
2706  * Input:
2707  *	fca_handle = handle setup by ql_bind_port().
2708  *	mapbuf= buffer pointer for map.
2709  *
2710  * Returns:
2711  *	FC_OLDPORT - the specified port is not operating in loop mode.
2712  *	FC_OFFLINE - the specified port is not online.
2713  *	FC_NOMAP - there is no loop map available for this port.
2714  *	FC_UNBOUND - the fca_handle specified is not bound.
2715  *	FC_SUCCESS - a valid map has been placed in mapbuf.
2716  *
2717  * Context:
2718  *	Kernel context.
2719  */
2720 static int
2721 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
2722 {
2723 	ql_adapter_state_t	*ha;
2724 	clock_t			timer;
2725 	int			rval = FC_SUCCESS;
2726 
2727 	ha = ql_fca_handle_to_state(fca_handle);
2728 	if (ha == NULL) {
2729 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2730 		    (void *)fca_handle);
2731 		return (FC_UNBOUND);
2732 	}
2733 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2734 
2735 	ASSERT(ha->power_level == PM_LEVEL_D0);
2736 
2737 	mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
2738 	mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
2739 
2740 	/* Wait for suspension to end. */
2741 	TASK_DAEMON_LOCK(ha);
2742 	while (ha->task_daemon_flags & QL_SUSPENDED) {
2743 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2744 
2745 		/* 30 seconds from now */
2746 		timer = ddi_get_lbolt();
2747 		timer += drv_usectohz(30000000);
2748 
2749 		if (cv_timedwait(&ha->pha->cv_dr_suspended,
2750 		    &ha->pha->task_daemon_mutex, timer) == -1) {
2751 			/*
2752 			 * The timeout time 'timer' was
2753 			 * reached without the condition
2754 			 * being signaled.
2755 			 */
2756 
2757 			/* Release task daemon lock. */
2758 			TASK_DAEMON_UNLOCK(ha);
2759 
2760 			EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
2761 			return (FC_TRAN_BUSY);
2762 		}
2763 	}
2764 	/* Release task daemon lock. */
2765 	TASK_DAEMON_UNLOCK(ha);
2766 
2767 	if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
2768 	    (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
2769 		/*
2770 		 * Now, since transport drivers cosider this as an
2771 		 * offline condition, let's wait for few seconds
2772 		 * for any loop transitions before we reset the.
2773 		 * chip and restart all over again.
2774 		 */
2775 		ql_delay(ha, 2000000);
2776 		EL(ha, "failed, FC_NOMAP\n");
2777 		rval = FC_NOMAP;
2778 	} else {
2779 		/*EMPTY*/
2780 		QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
2781 		    "data %xh %xh %xh %xh\n", ha->instance,
2782 		    mapbuf->lilp_myalpa, mapbuf->lilp_length,
2783 		    mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
2784 		    mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
2785 	}
2786 
2787 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2788 #if 0
2789 	QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
2790 #endif
2791 	return (rval);
2792 }
2793 
2794 /*
2795  * ql_transport
2796  *	Issue an I/O request. Handles all regular requests.
2797  *
2798  * Input:
2799  *	fca_handle = handle setup by ql_bind_port().
2800  *	pkt = pointer to fc_packet.
2801  *
2802  * Returns:
2803  *	FC_SUCCESS - the packet was accepted for transport.
2804  *	FC_TRANSPORT_ERROR - a transport error occurred.
2805  *	FC_BADPACKET - the packet to be transported had not been
2806  *			initialized by this FCA.
2807  *	FC_UNBOUND - the fca_handle specified is not bound.
2808  *
2809  * Context:
2810  *	Kernel context.
2811  */
2812 static int
2813 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
2814 {
2815 	ql_adapter_state_t	*ha;
2816 	int			rval = FC_TRANSPORT_ERROR;
2817 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
2818 
2819 	/* Verify proper command. */
2820 	ha = ql_cmd_setup(fca_handle, pkt, &rval);
2821 	if (ha == NULL) {
2822 		QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2823 		    rval, fca_handle);
2824 		return (rval);
2825 	}
2826 	QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
2827 #if 0
2828 	QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2829 	    sizeof (fc_frame_hdr_t) / 4);
2830 	QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2831 	QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
2832 #endif
2833 	if (ha->flags & ADAPTER_SUSPENDED) {
2834 		ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
2835 	}
2836 
2837 	ASSERT(ha->power_level == PM_LEVEL_D0);
2838 
2839 	/* Reset SRB flags. */
2840 	sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
2841 	    SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
2842 	    SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
2843 	    SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
2844 	    SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
2845 	    SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
2846 	    SRB_MS_PKT);
2847 
2848 	pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2849 	pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
2850 	pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2851 	pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
2852 	pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
2853 
2854 	switch (pkt->pkt_cmd_fhdr.r_ctl) {
2855 	case R_CTL_COMMAND:
2856 		if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2857 			sp->flags |= SRB_FCP_CMD_PKT;
2858 			rval = ql_fcp_scsi_cmd(ha, pkt, sp);
2859 		}
2860 		break;
2861 
2862 	default:
2863 		/* Setup response header and buffer. */
2864 		if (pkt->pkt_rsplen) {
2865 			bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2866 		}
2867 
2868 		switch (pkt->pkt_cmd_fhdr.r_ctl) {
2869 		case R_CTL_SOLICITED_DATA:
2870 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2871 				sp->flags |= SRB_FCP_DATA_PKT;
2872 				rval = ql_fcp_data_rsp(ha, pkt, sp);
2873 			}
2874 			break;
2875 
2876 		case R_CTL_STATUS:
2877 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
2878 				sp->flags |= SRB_FCP_RSP_PKT;
2879 				rval = ql_fcp_data_rsp(ha, pkt, sp);
2880 			}
2881 			break;
2882 
2883 		case R_CTL_UNSOL_DATA:
2884 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
2885 				sp->flags |= SRB_IP_PKT;
2886 				rval = ql_fcp_ip_cmd(ha, pkt, sp);
2887 			}
2888 			break;
2889 
2890 		case R_CTL_UNSOL_CONTROL:
2891 			if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
2892 				sp->flags |= SRB_GENERIC_SERVICES_PKT;
2893 				rval = ql_fc_services(ha, pkt);
2894 			}
2895 			break;
2896 
2897 		default:
2898 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
2899 			pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2900 			rval = FC_TRANSPORT_ERROR;
2901 			EL(ha, "unknown, r_ctl=%xh\n",
2902 			    pkt->pkt_cmd_fhdr.r_ctl);
2903 			break;
2904 		}
2905 	}
2906 
2907 	if (rval != FC_SUCCESS) {
2908 		EL(ha, "failed, rval = %xh\n", rval);
2909 	} else {
2910 		/*EMPTY*/
2911 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2912 	}
2913 
2914 	return (rval);
2915 }
2916 
2917 /*
2918  * ql_ub_alloc
2919  *	Allocate buffers for unsolicited exchanges.
2920  *
2921  * Input:
2922  *	fca_handle = handle setup by ql_bind_port().
2923  *	tokens = token array for each buffer.
2924  *	size = size of each buffer.
2925  *	count = pointer to number of buffers.
2926  *	type = the FC-4 type the buffers are reserved for.
2927  *		1 = Extended Link Services, 5 = LLC/SNAP
2928  *
2929  * Returns:
2930  *	FC_FAILURE - buffers could not be allocated.
2931  *	FC_TOOMANY - the FCA could not allocate the requested
2932  *			number of buffers.
2933  *	FC_SUCCESS - unsolicited buffers were allocated.
2934  *	FC_UNBOUND - the fca_handle specified is not bound.
2935  *
2936  * Context:
2937  *	Kernel context.
2938  */
2939 static int
2940 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
2941     uint32_t *count, uint32_t type)
2942 {
2943 	ql_adapter_state_t	*ha;
2944 	caddr_t			bufp = NULL;
2945 	fc_unsol_buf_t		*ubp;
2946 	ql_srb_t		*sp;
2947 	uint32_t		index;
2948 	uint32_t		cnt;
2949 	uint32_t		ub_array_index = 0;
2950 	int			rval = FC_SUCCESS;
2951 	int			ub_updated = FALSE;
2952 
2953 	/* Check handle. */
2954 	ha = ql_fca_handle_to_state(fca_handle);
2955 	if (ha == NULL) {
2956 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2957 		    (void *)fca_handle);
2958 		return (FC_UNBOUND);
2959 	}
2960 	QL_PRINT_10(CE_CONT, "(%d,%d): started, count = %xh\n",
2961 	    ha->instance, ha->vp_index, *count);
2962 
2963 	QL_PM_LOCK(ha);
2964 	if (ha->power_level != PM_LEVEL_D0) {
2965 		QL_PM_UNLOCK(ha);
2966 		QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
2967 		    ha->vp_index);
2968 		return (FC_FAILURE);
2969 	}
2970 	QL_PM_UNLOCK(ha);
2971 
2972 	/* Acquire adapter state lock. */
2973 	ADAPTER_STATE_LOCK(ha);
2974 
2975 	/* Check the count. */
2976 	if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
2977 		*count = 0;
2978 		EL(ha, "failed, FC_TOOMANY\n");
2979 		rval = FC_TOOMANY;
2980 	}
2981 
2982 	/*
2983 	 * reset ub_array_index
2984 	 */
2985 	ub_array_index = 0;
2986 
2987 	/*
2988 	 * Now proceed to allocate any buffers required
2989 	 */
2990 	for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
2991 		/* Allocate all memory needed. */
2992 		ubp = (fc_unsol_buf_t *)kmem_zalloc(
2993 		    sizeof (fc_unsol_buf_t), KM_SLEEP);
2994 		if (ubp == NULL) {
2995 			EL(ha, "failed, FC_FAILURE\n");
2996 			rval = FC_FAILURE;
2997 		} else {
2998 			sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
2999 			if (sp == NULL) {
3000 				kmem_free(ubp, sizeof (fc_unsol_buf_t));
3001 				rval = FC_FAILURE;
3002 			} else {
3003 				if (type == FC_TYPE_IS8802_SNAP) {
3004 #ifdef	__sparc
3005 					if (ql_get_dma_mem(ha,
3006 					    &sp->ub_buffer, size,
3007 					    BIG_ENDIAN_DMA, MEM_DATA_ALIGN) !=
3008 					    QL_SUCCESS) {
3009 						rval = FC_FAILURE;
3010 						kmem_free(ubp,
3011 						    sizeof (fc_unsol_buf_t));
3012 						kmem_free(sp,
3013 						    sizeof (ql_srb_t));
3014 					} else {
3015 						bufp = sp->ub_buffer.bp;
3016 						sp->ub_size = size;
3017 					}
3018 #else
3019 					if (ql_get_dma_mem(ha,
3020 					    &sp->ub_buffer, size,
3021 					    LITTLE_ENDIAN_DMA, MEM_DATA_ALIGN) !=
3022 					    QL_SUCCESS) {
3023 						rval = FC_FAILURE;
3024 						kmem_free(ubp,
3025 						    sizeof (fc_unsol_buf_t));
3026 						kmem_free(sp,
3027 						    sizeof (ql_srb_t));
3028 					} else {
3029 						bufp = sp->ub_buffer.bp;
3030 						sp->ub_size = size;
3031 					}
3032 #endif
3033 				} else {
3034 					bufp = kmem_zalloc(size, KM_SLEEP);
3035 					if (bufp == NULL) {
3036 						rval = FC_FAILURE;
3037 						kmem_free(ubp,
3038 						    sizeof (fc_unsol_buf_t));
3039 						kmem_free(sp,
3040 						    sizeof (ql_srb_t));
3041 					} else {
3042 						sp->ub_size = size;
3043 					}
3044 				}
3045 			}
3046 		}
3047 
3048 		if (rval == FC_SUCCESS) {
3049 			/* Find next available slot. */
3050 			QL_UB_LOCK(ha);
3051 			while (ha->ub_array[ub_array_index] != NULL) {
3052 				ub_array_index++;
3053 			}
3054 
3055 			ubp->ub_fca_private = (void *)sp;
3056 
3057 			/* init cmd links */
3058 			sp->cmd.base_address = sp;
3059 			sp->cmd.prev = NULL;
3060 			sp->cmd.next = NULL;
3061 			sp->cmd.head = NULL;
3062 
3063 			/* init wdg links */
3064 			sp->wdg.base_address = sp;
3065 			sp->wdg.prev = NULL;
3066 			sp->wdg.next = NULL;
3067 			sp->wdg.head = NULL;
3068 			sp->ha = ha;
3069 
3070 			ubp->ub_buffer = bufp;
3071 			ubp->ub_bufsize = size;
3072 			ubp->ub_port_handle = fca_handle;
3073 			ubp->ub_token = ub_array_index;
3074 
3075 			/* Save the token. */
3076 			tokens[index] = ub_array_index;
3077 
3078 			/* Setup FCA private information. */
3079 			sp->ub_type = type;
3080 			sp->handle = ub_array_index;
3081 			sp->flags |= SRB_UB_IN_FCA;
3082 
3083 			ha->ub_array[ub_array_index] = ubp;
3084 			ha->ub_allocated++;
3085 			ub_updated = TRUE;
3086 			QL_UB_UNLOCK(ha);
3087 		}
3088 	}
3089 
3090 	/* Release adapter state lock. */
3091 	ADAPTER_STATE_UNLOCK(ha);
3092 
3093 	/* IP buffer. */
3094 	if (ub_updated) {
3095 		if ((type == FC_TYPE_IS8802_SNAP) &&
3096 		    (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_25XX))))) {
3097 
3098 			ADAPTER_STATE_LOCK(ha);
3099 			ha->flags |= IP_ENABLED;
3100 			ADAPTER_STATE_UNLOCK(ha);
3101 
3102 			if (!(ha->flags & IP_INITIALIZED)) {
3103 				if (CFG_IST(ha, CFG_CTRL_2425)) {
3104 					ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3105 					    LSB(ql_ip_mtu);
3106 					ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3107 					    MSB(ql_ip_mtu);
3108 					ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3109 					    LSB(size);
3110 					ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3111 					    MSB(size);
3112 
3113 					cnt = CHAR_TO_SHORT(
3114 					    ha->ip_init_ctrl_blk.cb24.cc[0],
3115 					    ha->ip_init_ctrl_blk.cb24.cc[1]);
3116 
3117 					if (cnt < *count) {
3118 						ha->ip_init_ctrl_blk.cb24.cc[0]
3119 						    = LSB(*count);
3120 						ha->ip_init_ctrl_blk.cb24.cc[1]
3121 						    = MSB(*count);
3122 					}
3123 				} else {
3124 					ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3125 					    LSB(ql_ip_mtu);
3126 					ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3127 					    MSB(ql_ip_mtu);
3128 					ha->ip_init_ctrl_blk.cb.buf_size[0] =
3129 					    LSB(size);
3130 					ha->ip_init_ctrl_blk.cb.buf_size[1] =
3131 					    MSB(size);
3132 
3133 					cnt = CHAR_TO_SHORT(
3134 					    ha->ip_init_ctrl_blk.cb.cc[0],
3135 					    ha->ip_init_ctrl_blk.cb.cc[1]);
3136 
3137 					if (cnt < *count) {
3138 						ha->ip_init_ctrl_blk.cb.cc[0] =
3139 						    LSB(*count);
3140 						ha->ip_init_ctrl_blk.cb.cc[1] =
3141 						    MSB(*count);
3142 					}
3143 				}
3144 
3145 				(void) ql_initialize_ip(ha);
3146 			}
3147 			ql_isp_rcvbuf(ha);
3148 		}
3149 
3150 		if (CFG_IST(ha, CFG_TARGET_MODE_ENABLE) &&
3151 		    (type == FC_TYPE_SCSI_FCP)) {
3152 			(void) ql_modify_lun(ha);
3153 		}
3154 	}
3155 
3156 	if (rval != FC_SUCCESS) {
3157 		EL(ha, "failed=%xh\n", rval);
3158 	} else {
3159 		/*EMPTY*/
3160 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
3161 		    ha->vp_index);
3162 	}
3163 	return (rval);
3164 }
3165 
3166 /*
3167  * ql_ub_free
3168  *	Free unsolicited buffers.
3169  *
3170  * Input:
3171  *	fca_handle = handle setup by ql_bind_port().
3172  *	count = number of buffers.
3173  *	tokens = token array for each buffer.
3174  *
3175  * Returns:
3176  *	FC_SUCCESS - the requested buffers have been freed.
3177  *	FC_UNBOUND - the fca_handle specified is not bound.
3178  *	FC_UB_BADTOKEN - an invalid token was encountered.
3179  *			 No buffers have been released.
3180  *
3181  * Context:
3182  *	Kernel context.
3183  */
3184 static int
3185 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3186 {
3187 	ql_adapter_state_t	*ha;
3188 	ql_srb_t		*sp;
3189 	uint32_t		index;
3190 	uint64_t		ub_array_index;
3191 	int			rval = FC_SUCCESS;
3192 
3193 	/* Check handle. */
3194 	ha = ql_fca_handle_to_state(fca_handle);
3195 	if (ha == NULL) {
3196 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3197 		    (void *)fca_handle);
3198 		return (FC_UNBOUND);
3199 	}
3200 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3201 
3202 	/* Acquire adapter state lock. */
3203 	ADAPTER_STATE_LOCK(ha);
3204 
3205 	/* Check all returned tokens. */
3206 	for (index = 0; index < count; index++) {
3207 		fc_unsol_buf_t	*ubp;
3208 
3209 		/* Check the token range. */
3210 		if ((ub_array_index = tokens[index]) >=
3211 		    QL_UB_LIMIT) {
3212 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3213 			rval = FC_UB_BADTOKEN;
3214 			break;
3215 		}
3216 
3217 		/* Check the unsolicited buffer array. */
3218 		QL_UB_LOCK(ha);
3219 		ubp = ha->ub_array[ub_array_index];
3220 
3221 		if (ubp == NULL) {
3222 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3223 			rval = FC_UB_BADTOKEN;
3224 			QL_UB_UNLOCK(ha);
3225 			break;
3226 		}
3227 
3228 		/* Check the state of the unsolicited buffer. */
3229 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3230 		sp->flags |= SRB_UB_FREE_REQUESTED;
3231 
3232 		while (!(sp->flags & SRB_UB_IN_FCA) ||
3233 		    (sp->flags & (SRB_UB_CALLBACK |
3234 		    SRB_UB_ACQUIRED))) {
3235 			QL_UB_UNLOCK(ha);
3236 			ADAPTER_STATE_UNLOCK(ha);
3237 			delay(drv_usectohz(100000));
3238 			ADAPTER_STATE_LOCK(ha);
3239 			QL_UB_LOCK(ha);
3240 		}
3241 		ha->ub_array[ub_array_index] = NULL;
3242 		QL_UB_UNLOCK(ha);
3243 		ql_free_unsolicited_buffer(ha, ubp);
3244 	}
3245 
3246 	if (rval == FC_SUCCESS) {
3247 		/*
3248 		 * Signal any pending hardware reset when there are
3249 		 * no more unsolicited buffers in use.
3250 		 */
3251 		if (ha->ub_allocated == 0) {
3252 			cv_broadcast(&ha->pha->cv_ub);
3253 		}
3254 	}
3255 
3256 	/* Release adapter state lock. */
3257 	ADAPTER_STATE_UNLOCK(ha);
3258 
3259 	/*
3260 	 * Inform the firmware about the change of scsi target
3261 	 * mode buffers.
3262 	 */
3263 	if (ha->flags & TARGET_MODE_INITIALIZED) {
3264 		(void) ql_modify_lun(ha);
3265 	}
3266 
3267 	if (rval != FC_SUCCESS) {
3268 		EL(ha, "failed=%xh\n", rval);
3269 	} else {
3270 		/*EMPTY*/
3271 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3272 	}
3273 	return (rval);
3274 }
3275 
3276 /*
3277  * ql_ub_release
3278  *	Release unsolicited buffers from FC Transport
3279  *	to FCA for future use.
3280  *
3281  * Input:
3282  *	fca_handle = handle setup by ql_bind_port().
3283  *	count = number of buffers.
3284  *	tokens = token array for each buffer.
3285  *
3286  * Returns:
3287  *	FC_SUCCESS - the requested buffers have been released.
3288  *	FC_UNBOUND - the fca_handle specified is not bound.
3289  *	FC_UB_BADTOKEN - an invalid token was encountered.
3290  *		No buffers have been released.
3291  *
3292  * Context:
3293  *	Kernel context.
3294  */
3295 static int
3296 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3297 {
3298 	ql_adapter_state_t	*ha;
3299 	ql_srb_t		*sp;
3300 	ql_tgt_t		*tq;
3301 	port_id_t		d_id;
3302 	uint32_t		index;
3303 	uint64_t		ub_array_index;
3304 	int			rval = FC_SUCCESS;
3305 	int			ub_ip_updated = FALSE;
3306 
3307 	/* Check handle. */
3308 	ha = ql_fca_handle_to_state(fca_handle);
3309 	if (ha == NULL) {
3310 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3311 		    (void *)fca_handle);
3312 		return (FC_UNBOUND);
3313 	}
3314 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3315 
3316 	/* Acquire adapter state lock. */
3317 	ADAPTER_STATE_LOCK(ha);
3318 	QL_UB_LOCK(ha);
3319 
3320 	/* Check all returned tokens. */
3321 	for (index = 0; index < count; index++) {
3322 		/* Check the token range. */
3323 		if ((ub_array_index = tokens[index]) >=
3324 		    QL_UB_LIMIT) {
3325 			EL(ha, "failed, FC_UB_BADTOKEN\n");
3326 			rval = FC_UB_BADTOKEN;
3327 			break;
3328 		}
3329 
3330 		/* Check the unsolicited buffer array. */
3331 		if (ha->ub_array[ub_array_index] == NULL) {
3332 			EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3333 			rval = FC_UB_BADTOKEN;
3334 			break;
3335 		}
3336 
3337 		/* Check the state of the unsolicited buffer. */
3338 		sp = ha->ub_array[ub_array_index]->ub_fca_private;
3339 		if (sp->flags & SRB_UB_IN_FCA) {
3340 			EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3341 			rval = FC_UB_BADTOKEN;
3342 			break;
3343 		}
3344 	}
3345 
3346 	/* If all tokens checkout, release the buffers. */
3347 	if (rval == FC_SUCCESS) {
3348 		/* Check all returned tokens. */
3349 		for (index = 0; index < count; index++) {
3350 			fc_unsol_buf_t	*ubp;
3351 
3352 			ub_array_index = tokens[index];
3353 			ubp = ha->ub_array[ub_array_index];
3354 			sp = ubp->ub_fca_private;
3355 			d_id.b24 = ubp->ub_frame.s_id;
3356 			tq = ql_d_id_to_queue(ha, d_id);
3357 
3358 			if (sp->ub_type == FC_TYPE_SCSI_FCP &&
3359 			    ubp->ub_resp_flags & FC_UB_RESP_LOGIN_REQUIRED &&
3360 			    tq != NULL) {
3361 				ctio_entry_t	*ctio;
3362 
3363 				if (ql_req_pkt(ha, (request_t **)
3364 				    &ctio) == QL_SUCCESS) {
3365 					ctio->entry_type = CTIO_TYPE_2;
3366 
3367 					if (CFG_IST(ha,
3368 					    CFG_EXT_FW_INTERFACE)) {
3369 						ctio->initiator_id_l =
3370 						    LSB(tq->loop_id);
3371 						ctio->initiator_id_h =
3372 						    MSB(tq->loop_id);
3373 					} else {
3374 						ctio->initiator_id_h =
3375 						    LSB(tq->loop_id);
3376 					}
3377 					ctio->rx_id = ubp->ub_frame.rx_id;
3378 					ctio->flags_l = BIT_7 | BIT_6;
3379 					ctio->flags_h = BIT_7 | BIT_1 | BIT_0;
3380 					ctio->timeout = 0xffff;
3381 					ctio->type.s0_32bit.scsi_status_l =
3382 					    STATUS_BUSY;
3383 					/* Issue command to ISP */
3384 					ql_isp_cmd(ha);
3385 				}
3386 			}
3387 
3388 			ubp->ub_resp_flags = 0;
3389 			sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3390 			sp->flags |= SRB_UB_IN_FCA;
3391 
3392 			/* IP buffer. */
3393 			if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3394 				ub_ip_updated = TRUE;
3395 			}
3396 		}
3397 	}
3398 
3399 	QL_UB_UNLOCK(ha);
3400 	/* Release adapter state lock. */
3401 	ADAPTER_STATE_UNLOCK(ha);
3402 
3403 	/*
3404 	 * XXX: We should call ql_isp_rcvbuf() to return a
3405 	 * buffer to ISP only if the number of buffers fall below
3406 	 * the low water mark.
3407 	 */
3408 	if (ub_ip_updated) {
3409 		ql_isp_rcvbuf(ha);
3410 	}
3411 
3412 	if (rval != FC_SUCCESS) {
3413 		EL(ha, "failed, rval = %xh\n", rval);
3414 	} else {
3415 		/*EMPTY*/
3416 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3417 	}
3418 	return (rval);
3419 }
3420 
3421 /*
3422  * ql_abort
3423  *	Abort a packet.
3424  *
3425  * Input:
3426  *	fca_handle = handle setup by ql_bind_port().
3427  *	pkt = pointer to fc_packet.
3428  *	flags = KM_SLEEP flag.
3429  *
3430  * Returns:
3431  *	FC_SUCCESS - the packet has successfully aborted.
3432  *	FC_ABORTED - the packet has successfully aborted.
3433  *	FC_ABORTING - the packet is being aborted.
3434  *	FC_ABORT_FAILED - the packet could not be aborted.
3435  *	FC_TRANSPORT_ERROR - a transport error occurred while attempting
3436  *		to abort the packet.
3437  *	FC_BADEXCHANGE - no packet found.
3438  *	FC_UNBOUND - the fca_handle specified is not bound.
3439  *
3440  * Context:
3441  *	Kernel context.
3442  */
3443 static int
3444 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3445 {
3446 	port_id_t		d_id;
3447 	ql_link_t		*link;
3448 	ql_adapter_state_t	*ha, *pha;
3449 	ql_srb_t		*sp;
3450 	ql_tgt_t		*tq;
3451 	ql_lun_t		*lq;
3452 	int			rval = FC_ABORTED;
3453 
3454 	ha = ql_fca_handle_to_state(fca_handle);
3455 	if (ha == NULL) {
3456 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3457 		    (void *)fca_handle);
3458 		return (FC_UNBOUND);
3459 	}
3460 
3461 	pha = ha->pha;
3462 
3463 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3464 
3465 	ASSERT(pha->power_level == PM_LEVEL_D0);
3466 
3467 	/* Get target queue pointer. */
3468 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3469 	tq = ql_d_id_to_queue(ha, d_id);
3470 
3471 	if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3472 		if (tq == NULL) {
3473 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3474 			rval = FC_TRANSPORT_ERROR;
3475 		} else {
3476 			EL(ha, "failed, FC_OFFLINE\n");
3477 			rval = FC_OFFLINE;
3478 		}
3479 		return (rval);
3480 	}
3481 
3482 	sp = (ql_srb_t *)pkt->pkt_fca_private;
3483 	lq = sp->lun_queue;
3484 
3485 	/* Set poll flag if sleep wanted. */
3486 	if (flags == KM_SLEEP) {
3487 		sp->flags |= SRB_POLL;
3488 	}
3489 
3490 	/* Acquire target queue lock. */
3491 	DEVICE_QUEUE_LOCK(tq);
3492 	REQUEST_RING_LOCK(ha);
3493 
3494 	/* If command not already started. */
3495 	if (!(sp->flags & SRB_ISP_STARTED)) {
3496 		/* Check pending queue for command. */
3497 		sp = NULL;
3498 		for (link = pha->pending_cmds.first; link != NULL;
3499 		    link = link->next) {
3500 			sp = link->base_address;
3501 			if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3502 				/* Remove srb from q. */
3503 				ql_remove_link(&pha->pending_cmds, &sp->cmd);
3504 				break;
3505 			} else {
3506 				sp = NULL;
3507 			}
3508 		}
3509 		REQUEST_RING_UNLOCK(ha);
3510 
3511 		if (sp == NULL) {
3512 			/* Check for cmd on device queue. */
3513 			for (link = lq->cmd.first; link != NULL;
3514 			    link = link->next) {
3515 				sp = link->base_address;
3516 				if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3517 					/* Remove srb from q. */
3518 					ql_remove_link(&lq->cmd, &sp->cmd);
3519 					break;
3520 				} else {
3521 					sp = NULL;
3522 				}
3523 			}
3524 		}
3525 		/* Release device lock */
3526 		DEVICE_QUEUE_UNLOCK(tq);
3527 
3528 		/* If command on target queue. */
3529 		if (sp != NULL) {
3530 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3531 
3532 			/* Set return status */
3533 			pkt->pkt_reason = CS_ABORTED;
3534 
3535 			sp->cmd.next = NULL;
3536 			ql_done(&sp->cmd);
3537 			rval = FC_ABORTED;
3538 		} else {
3539 			EL(ha, "failed, FC_BADEXCHANGE\n");
3540 			rval = FC_BADEXCHANGE;
3541 		}
3542 	} else if (sp->flags & SRB_ISP_COMPLETED) {
3543 		/* Release device queue lock. */
3544 		REQUEST_RING_UNLOCK(ha);
3545 		DEVICE_QUEUE_UNLOCK(tq);
3546 		EL(ha, "failed, already done, FC_FAILURE\n");
3547 		rval = FC_FAILURE;
3548 	} else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3549 	    (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3550 		/*
3551 		 * If here, target data/resp ctio is with Fw.
3552 		 * Since firmware is supposed to terminate such I/Os
3553 		 * with an error, we need not do any thing. If FW
3554 		 * decides not to terminate those IOs and simply keep
3555 		 * quite then we need to initiate cleanup here by
3556 		 * calling ql_done.
3557 		 */
3558 		REQUEST_RING_UNLOCK(ha);
3559 		DEVICE_QUEUE_UNLOCK(tq);
3560 		rval = FC_ABORTED;
3561 	} else {
3562 		request_t	*ep = pha->request_ring_bp;
3563 		uint16_t	cnt;
3564 
3565 		if (sp->handle != 0) {
3566 			for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3567 				if (sp->handle == ddi_get32(
3568 				    pha->hba_buf.acc_handle, &ep->handle)) {
3569 					ep->entry_type = INVALID_ENTRY_TYPE;
3570 					break;
3571 				}
3572 				ep++;
3573 			}
3574 		}
3575 
3576 		/* Release device queue lock. */
3577 		REQUEST_RING_UNLOCK(ha);
3578 		DEVICE_QUEUE_UNLOCK(tq);
3579 
3580 		sp->flags |= SRB_ABORTING;
3581 		(void) ql_abort_command(ha, sp);
3582 		pkt->pkt_reason = CS_ABORTED;
3583 		rval = FC_ABORTED;
3584 	}
3585 
3586 	QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3587 
3588 	return (rval);
3589 }
3590 
3591 /*
3592  * ql_reset
3593  *	Reset link or hardware.
3594  *
3595  * Input:
3596  *	fca_handle = handle setup by ql_bind_port().
3597  *	cmd = reset type command.
3598  *
3599  * Returns:
3600  *	FC_SUCCESS - reset has successfully finished.
3601  *	FC_UNBOUND - the fca_handle specified is not bound.
3602  *	FC_FAILURE - reset failed.
3603  *
3604  * Context:
3605  *	Kernel context.
3606  */
3607 static int
3608 ql_reset(opaque_t fca_handle, uint32_t cmd)
3609 {
3610 	ql_adapter_state_t	*ha;
3611 	int			rval = FC_SUCCESS, rval2;
3612 
3613 	ha = ql_fca_handle_to_state(fca_handle);
3614 	if (ha == NULL) {
3615 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3616 		    (void *)fca_handle);
3617 		return (FC_UNBOUND);
3618 	}
3619 
3620 	QL_PRINT_10(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3621 	    ha->vp_index, cmd);
3622 
3623 	ASSERT(ha->power_level == PM_LEVEL_D0);
3624 
3625 	switch (cmd) {
3626 	case FC_FCA_CORE:
3627 		/* dump firmware core if specified. */
3628 		if (ha->vp_index == 0) {
3629 			if (ql_dump_firmware(ha) != QL_SUCCESS) {
3630 				EL(ha, "failed, FC_FAILURE\n");
3631 				rval = FC_FAILURE;
3632 			}
3633 		}
3634 		break;
3635 	case FC_FCA_LINK_RESET:
3636 		if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3637 			if (ql_loop_reset(ha) != QL_SUCCESS) {
3638 				EL(ha, "failed, FC_FAILURE-2\n");
3639 				rval = FC_FAILURE;
3640 			}
3641 		}
3642 		break;
3643 	case FC_FCA_RESET_CORE:
3644 	case FC_FCA_RESET:
3645 		/* if dump firmware core if specified. */
3646 		if (cmd == FC_FCA_RESET_CORE) {
3647 			if (ha->vp_index != 0) {
3648 				rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3649 				    ? QL_SUCCESS : ql_loop_reset(ha);
3650 			} else {
3651 				rval2 = ql_dump_firmware(ha);
3652 			}
3653 			if (rval2 != QL_SUCCESS) {
3654 				EL(ha, "failed, FC_FAILURE-3\n");
3655 				rval = FC_FAILURE;
3656 			}
3657 		}
3658 
3659 		/* Free up all unsolicited buffers. */
3660 		if (ha->ub_allocated != 0) {
3661 			/* Inform to release buffers. */
3662 			ha->state = FC_PORT_SPEED_MASK(ha->state);
3663 			ha->state |= FC_STATE_RESET_REQUESTED;
3664 			if (ha->flags & FCA_BOUND) {
3665 				(ha->bind_info.port_statec_cb)
3666 				    (ha->bind_info.port_handle,
3667 				    ha->state);
3668 			}
3669 		}
3670 
3671 		ha->state = FC_PORT_SPEED_MASK(ha->state);
3672 
3673 		/* All buffers freed */
3674 		if (ha->ub_allocated == 0) {
3675 			/* Hardware reset. */
3676 			if (cmd == FC_FCA_RESET) {
3677 				if (ha->vp_index == 0) {
3678 					(void) ql_abort_isp(ha);
3679 				} else if (!(ha->pha->task_daemon_flags &
3680 				    LOOP_DOWN)) {
3681 					(void) ql_loop_reset(ha);
3682 				}
3683 			}
3684 
3685 			/* Inform that the hardware has been reset */
3686 			ha->state |= FC_STATE_RESET;
3687 		} else {
3688 			/*
3689 			 * the port driver expects an online if
3690 			 * buffers are not freed.
3691 			 */
3692 			if (ha->topology & QL_LOOP_CONNECTION) {
3693 				ha->state |= FC_STATE_LOOP;
3694 			} else {
3695 				ha->state |= FC_STATE_ONLINE;
3696 			}
3697 		}
3698 
3699 		TASK_DAEMON_LOCK(ha);
3700 		ha->task_daemon_flags |= FC_STATE_CHANGE;
3701 		TASK_DAEMON_UNLOCK(ha);
3702 
3703 		ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3704 
3705 		break;
3706 	default:
3707 		EL(ha, "unknown cmd=%xh\n", cmd);
3708 		break;
3709 	}
3710 
3711 	if (rval != FC_SUCCESS) {
3712 		EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3713 	} else {
3714 		/*EMPTY*/
3715 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
3716 		    ha->vp_index);
3717 	}
3718 
3719 	return (rval);
3720 }
3721 
3722 /*
3723  * ql_port_manage
3724  *	Perform port management or diagnostics.
3725  *
3726  * Input:
3727  *	fca_handle = handle setup by ql_bind_port().
3728  *	cmd = pointer to command structure.
3729  *
3730  * Returns:
3731  *	FC_SUCCESS - the request completed successfully.
3732  *	FC_FAILURE - the request did not complete successfully.
3733  *	FC_UNBOUND - the fca_handle specified is not bound.
3734  *
3735  * Context:
3736  *	Kernel context.
3737  */
3738 static int
3739 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3740 {
3741 	clock_t			timer;
3742 	uint16_t		index;
3743 	uint32_t		*bp;
3744 	port_id_t		d_id;
3745 	ql_link_t		*link;
3746 	ql_adapter_state_t	*ha, *pha;
3747 	ql_tgt_t		*tq;
3748 	dma_mem_t		buffer_xmt, buffer_rcv;
3749 	size_t			length;
3750 	uint32_t		cnt;
3751 	char			buf[80];
3752 	lbp_t			*lb;
3753 	ql_mbx_data_t		mr;
3754 	app_mbx_cmd_t		*mcp;
3755 	int			i0;
3756 	uint8_t			*bptr;
3757 	int			rval2, rval = FC_SUCCESS;
3758 	uint32_t		opcode;
3759 
3760 	ha = ql_fca_handle_to_state(fca_handle);
3761 	if (ha == NULL) {
3762 		QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3763 		    (void *)fca_handle);
3764 		return (FC_UNBOUND);
3765 	}
3766 	pha = ha->pha;
3767 
3768 	QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3769 	    cmd->pm_cmd_code);
3770 
3771 	ASSERT(pha->power_level == PM_LEVEL_D0);
3772 
3773 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3774 
3775 	/*
3776 	 * Wait for all outstanding commands to complete
3777 	 */
3778 	index = (uint16_t)ql_wait_outstanding(ha);
3779 
3780 	if (index != MAX_OUTSTANDING_COMMANDS) {
3781 		ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3782 		ql_restart_queues(ha);
3783 		EL(ha, "failed, FC_TRAN_BUSY\n");
3784 		return (FC_TRAN_BUSY);
3785 	}
3786 
3787 	switch (cmd->pm_cmd_code) {
3788 	case FC_PORT_BYPASS:
3789 		d_id.b24 = *cmd->pm_cmd_buf;
3790 		tq = ql_d_id_to_queue(ha, d_id);
3791 		if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3792 			EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3793 			rval = FC_FAILURE;
3794 		}
3795 		break;
3796 	case FC_PORT_UNBYPASS:
3797 		d_id.b24 = *cmd->pm_cmd_buf;
3798 		tq = ql_d_id_to_queue(ha, d_id);
3799 		if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3800 			EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3801 			rval = FC_FAILURE;
3802 		}
3803 		break;
3804 	case FC_PORT_GET_FW_REV:
3805 		(void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
3806 		    pha->fw_minor_version, pha->fw_subminor_version);
3807 		length = strlen(buf) + 1;
3808 		if (cmd->pm_data_len < length) {
3809 			cmd->pm_data_len = length;
3810 			EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
3811 			rval = FC_FAILURE;
3812 		} else {
3813 			(void) strcpy(cmd->pm_data_buf, buf);
3814 		}
3815 		break;
3816 
3817 	case FC_PORT_GET_FCODE_REV: {
3818 		caddr_t		fcode_ver_buf = NULL;
3819 
3820 		i0 = 0;
3821 		/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
3822 		rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
3823 		    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
3824 		    (caddr_t)&fcode_ver_buf, &i0);
3825 		length = (uint_t)i0;
3826 
3827 		if (rval2 != DDI_PROP_SUCCESS) {
3828 			EL(ha, "failed, getting version = %xh\n", rval2);
3829 			length = 20;
3830 			fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
3831 			if (fcode_ver_buf != NULL) {
3832 				(void) sprintf(fcode_ver_buf,
3833 				    "NO FCODE FOUND");
3834 			}
3835 		}
3836 
3837 		if (cmd->pm_data_len < length) {
3838 			EL(ha, "length error, FC_PORT_GET_FCODE_REV "
3839 			    "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
3840 			cmd->pm_data_len = length;
3841 			rval = FC_FAILURE;
3842 		} else if (fcode_ver_buf != NULL) {
3843 			bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
3844 			    length);
3845 		}
3846 
3847 		if (fcode_ver_buf != NULL) {
3848 			kmem_free(fcode_ver_buf, length);
3849 		}
3850 		break;
3851 	}
3852 
3853 	case FC_PORT_GET_DUMP:
3854 		if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
3855 			EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
3856 			    "length=%lxh\n", cmd->pm_data_len);
3857 			cmd->pm_data_len = pha->risc_dump_size;
3858 			rval = FC_FAILURE;
3859 		} else if (ql_dump_state & QL_DUMPING) {
3860 			EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
3861 			rval = FC_TRAN_BUSY;
3862 		} else if (ql_dump_state & QL_DUMP_VALID) {
3863 			(void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
3864 			ql_dump_state |= QL_DUMP_UPLOADED;
3865 		} else {
3866 			EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
3867 			rval = FC_FAILURE;
3868 		}
3869 		break;
3870 	case FC_PORT_FORCE_DUMP:
3871 		PORTMANAGE_LOCK(ha);
3872 		if (ql_dump_firmware(ha) != QL_SUCCESS) {
3873 			EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
3874 			rval = FC_FAILURE;
3875 		}
3876 		PORTMANAGE_UNLOCK(ha);
3877 		break;
3878 	case FC_PORT_DOWNLOAD_FW:
3879 		PORTMANAGE_LOCK(ha);
3880 		if (CFG_IST(ha, CFG_CTRL_2425)) {
3881 			if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
3882 			    (uint32_t)cmd->pm_data_len,
3883 			    FLASH_24XX_FIRMWARE_ADDR) != QL_SUCCESS) {
3884 				EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
3885 				rval = FC_FAILURE;
3886 			}
3887 			ql_reset_chip(ha);
3888 			(void) ql_abort_isp(ha);
3889 		} else {
3890 			/* Save copy of the firmware. */
3891 			if (pha->risc_code != NULL) {
3892 				kmem_free(pha->risc_code, pha->risc_code_size);
3893 				pha->risc_code = NULL;
3894 				pha->risc_code_size = 0;
3895 			}
3896 
3897 			pha->risc_code = kmem_alloc(cmd->pm_data_len,
3898 			    KM_SLEEP);
3899 			if (pha->risc_code != NULL) {
3900 				pha->risc_code_size =
3901 				    (uint32_t)cmd->pm_data_len;
3902 				bcopy(cmd->pm_data_buf, pha->risc_code,
3903 				    cmd->pm_data_len);
3904 
3905 				/* Do abort to force reload. */
3906 				ql_reset_chip(ha);
3907 				if (ql_abort_isp(ha) != QL_SUCCESS) {
3908 					kmem_free(pha->risc_code,
3909 					    pha->risc_code_size);
3910 					pha->risc_code = NULL;
3911 					pha->risc_code_size = 0;
3912 					ql_reset_chip(ha);
3913 					(void) ql_abort_isp(ha);
3914 					EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
3915 					    " FC_FAILURE\n");
3916 					rval = FC_FAILURE;
3917 				}
3918 			}
3919 		}
3920 		PORTMANAGE_UNLOCK(ha);
3921 		break;
3922 	case FC_PORT_GET_DUMP_SIZE:
3923 		bp = (uint32_t *)cmd->pm_data_buf;
3924 		*bp = pha->risc_dump_size;
3925 		break;
3926 	case FC_PORT_DIAG:
3927 		/*
3928 		 * Prevents concurrent diags
3929 		 */
3930 		PORTMANAGE_LOCK(ha);
3931 
3932 		/* Wait for suspension to end. */
3933 		for (timer = 0; timer < 3000 &&
3934 		    pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
3935 			ql_delay(ha, 10000);
3936 		}
3937 
3938 		if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
3939 			EL(ha, "failed, FC_TRAN_BUSY-2\n");
3940 			rval = FC_TRAN_BUSY;
3941 			PORTMANAGE_UNLOCK(ha);
3942 			break;
3943 		}
3944 
3945 		switch (cmd->pm_cmd_flags) {
3946 		case QL_DIAG_EXEFMW:
3947 			if (ql_start_firmware(ha) != QL_SUCCESS) {
3948 				EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
3949 				rval = FC_FAILURE;
3950 			}
3951 			break;
3952 		case QL_DIAG_CHKCMDQUE:
3953 			for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
3954 			    i0++) {
3955 				cnt += (pha->outstanding_cmds[i0] != NULL);
3956 			}
3957 			if (cnt != 0) {
3958 				EL(ha, "failed, QL_DIAG_CHKCMDQUE "
3959 				    "FC_FAILURE\n");
3960 				rval = FC_FAILURE;
3961 			}
3962 			break;
3963 		case QL_DIAG_FMWCHKSUM:
3964 			if (ql_verify_checksum(ha) != QL_SUCCESS) {
3965 				EL(ha, "failed, QL_DIAG_FMWCHKSUM "
3966 				    "FC_FAILURE\n");
3967 				rval = FC_FAILURE;
3968 			}
3969 			break;
3970 		case QL_DIAG_SLFTST:
3971 			if (ql_online_selftest(ha) != QL_SUCCESS) {
3972 				EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
3973 				rval = FC_FAILURE;
3974 			}
3975 			ql_reset_chip(ha);
3976 			(void) ql_abort_isp(ha);
3977 			break;
3978 		case QL_DIAG_REVLVL:
3979 			if (cmd->pm_stat_len <
3980 			    sizeof (ql_adapter_revlvl_t)) {
3981 				EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
3982 				    "slen=%lxh, rlvllen=%lxh\n",
3983 				    cmd->pm_stat_len,
3984 				    sizeof (ql_adapter_revlvl_t));
3985 				rval = FC_NOMEM;
3986 			} else {
3987 				bcopy((void *)&(pha->adapter_stats->revlvl),
3988 				    cmd->pm_stat_buf,
3989 				    (size_t)cmd->pm_stat_len);
3990 				cmd->pm_stat_len =
3991 				    sizeof (ql_adapter_revlvl_t);
3992 			}
3993 			break;
3994 		case QL_DIAG_LPBMBX:
3995 
3996 			if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
3997 				EL(ha, "failed, QL_DIAG_LPBMBX "
3998 				    "FC_INVALID_REQUEST, pmlen=%lxh, "
3999 				    "reqd=%lxh\n", cmd->pm_data_len,
4000 				    sizeof (struct app_mbx_cmd));
4001 				rval = FC_INVALID_REQUEST;
4002 				break;
4003 			}
4004 
4005 			mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4006 			mr.mb[1] = mcp->mb[1];
4007 			mr.mb[2] = mcp->mb[2];
4008 			mr.mb[3] = mcp->mb[3];
4009 			mr.mb[4] = RD16_IO_REG(pha, mailbox[4]);
4010 			mr.mb[5] = RD16_IO_REG(pha, mailbox[5]);
4011 			mr.mb[6] = mcp->mb[6];
4012 			mr.mb[7] = mcp->mb[7];
4013 
4014 			bcopy(&mr.mb[0], &mr.mb[10], sizeof (uint16_t) * 8);
4015 			if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4016 				EL(ha, "failed, QL_DIAG_LPBMBX FC_FAILURE\n");
4017 				rval = FC_FAILURE;
4018 				break;
4019 			}
4020 
4021 			for (i0 = 1; i0 < 8; i0++) {
4022 				if (i0 == 4) {
4023 					i0 = 6;
4024 				}
4025 				if (mr.mb[i0] != mr.mb[i0 + 10]) {
4026 					EL(ha, "failed, QL_DIAG_LPBMBX "
4027 					    "FC_FAILURE-2\n");
4028 
4029 					(void) ql_flash_errlog(ha,
4030 					    FLASH_ERRLOG_ISP_ERR, 0,
4031 					    RD16_IO_REG(ha, hccr),
4032 					    RD16_IO_REG(ha, istatus));
4033 
4034 					rval = FC_FAILURE;
4035 					break;
4036 				}
4037 			}
4038 			(void) ql_abort_isp(ha);
4039 			break;
4040 		case QL_DIAG_LPBDTA:
4041 			/*
4042 			 * For loopback data, we receive the
4043 			 * data back in pm_stat_buf. This provides
4044 			 * the user an opportunity to compare the
4045 			 * transmitted and received data.
4046 			 *
4047 			 * NB: lb->options are:
4048 			 *	0 --> Ten bit loopback
4049 			 *	1 --> One bit loopback
4050 			 *	2 --> External loopback
4051 			 */
4052 			if (cmd->pm_data_len > 65536) {
4053 				rval = FC_TOOMANY;
4054 				EL(ha, "failed, QL_DIAG_LPBDTA "
4055 				    "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4056 				break;
4057 			}
4058 			if (ql_get_dma_mem(ha, &buffer_xmt,
4059 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4060 			    MEM_DATA_ALIGN) != QL_SUCCESS) {
4061 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4062 				rval = FC_NOMEM;
4063 				break;
4064 			}
4065 			if (ql_get_dma_mem(ha, &buffer_rcv,
4066 			    (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4067 			    MEM_DATA_ALIGN) != QL_SUCCESS) {
4068 				EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4069 				rval = FC_NOMEM;
4070 				break;
4071 			}
4072 			ddi_rep_put8(buffer_xmt.acc_handle,
4073 			    (uint8_t *)cmd->pm_data_buf,
4074 			    (uint8_t *)buffer_xmt.bp,
4075 			    cmd->pm_data_len, DDI_DEV_AUTOINCR);
4076 
4077 			/* 22xx's adapter must be in loop mode for test. */
4078 			if (CFG_IST(ha, CFG_CTRL_2200)) {
4079 				bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4080 				if (ha->flags & POINT_TO_POINT ||
4081 				    (ha->task_daemon_flags & LOOP_DOWN &&
4082 				    *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4083 					cnt = *bptr;
4084 					*bptr = (uint8_t)
4085 					    (*bptr & ~(BIT_6|BIT_5|BIT_4));
4086 					(void) ql_abort_isp(ha);
4087 					*bptr = (uint8_t)cnt;
4088 				}
4089 			}
4090 
4091 			/* Shutdown IP. */
4092 			if (pha->flags & IP_INITIALIZED) {
4093 				(void) ql_shutdown_ip(pha);
4094 			}
4095 
4096 			lb = (lbp_t *)cmd->pm_cmd_buf;
4097 			lb->transfer_count =
4098 			    (uint32_t)cmd->pm_data_len;
4099 			lb->transfer_segment_count = 0;
4100 			lb->receive_segment_count = 0;
4101 			lb->transfer_data_address =
4102 			    buffer_xmt.cookie.dmac_address;
4103 			lb->receive_data_address =
4104 			    buffer_rcv.cookie.dmac_address;
4105 
4106 			if ((lb->options & 7) == 2 &&
4107 			    pha->task_daemon_flags &
4108 			    (QL_LOOP_TRANSITION | LOOP_DOWN)) {
4109 				/* Loop must be up for external */
4110 				EL(ha, "failed, QL_DIAG_LPBDTA FC_TRAN_BUSY\n");
4111 				rval = FC_TRAN_BUSY;
4112 			} else if (ql_loop_back(ha, lb,
4113 			    buffer_xmt.cookie.dmac_notused,
4114 			    buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4115 				bzero((void *)cmd->pm_stat_buf,
4116 				    cmd->pm_stat_len);
4117 				ddi_rep_get8(buffer_rcv.acc_handle,
4118 				    (uint8_t *)cmd->pm_stat_buf,
4119 				    (uint8_t *)buffer_rcv.bp,
4120 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4121 			} else {
4122 				EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4123 				rval = FC_FAILURE;
4124 			}
4125 
4126 			ql_free_phys(ha, &buffer_xmt);
4127 			ql_free_phys(ha, &buffer_rcv);
4128 
4129 			/* Needed to recover the f/w */
4130 			(void) ql_abort_isp(ha);
4131 
4132 			/* Restart IP if it was shutdown. */
4133 			if (pha->flags & IP_ENABLED &&
4134 			    !(pha->flags & IP_INITIALIZED)) {
4135 				(void) ql_initialize_ip(pha);
4136 				ql_isp_rcvbuf(pha);
4137 			}
4138 
4139 			break;
4140 		case QL_DIAG_ECHO: {
4141 			/*
4142 			 * issue an echo command with a user supplied
4143 			 * data pattern and destination address
4144 			 */
4145 			echo_t		echo;		/* temp echo struct */
4146 
4147 			/* Setup echo cmd & adjust for platform */
4148 			opcode = QL_ECHO_CMD;
4149 			BIG_ENDIAN_32(&opcode);
4150 
4151 			/*
4152 			 * due to limitations in the ql
4153 			 * firmaware the echo data field is
4154 			 * limited to 220
4155 			 */
4156 			if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4157 			    (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4158 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4159 				    "cmdl1=%lxh, statl2=%lxh\n",
4160 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4161 				rval = FC_TOOMANY;
4162 				break;
4163 			}
4164 
4165 			/*
4166 			 * the input data buffer has the user
4167 			 * supplied data pattern.  The "echoed"
4168 			 * data will be DMAed into the output
4169 			 * data buffer.  Therefore the length
4170 			 * of the output buffer must be equal
4171 			 * to or greater then the input buffer
4172 			 * length
4173 			 */
4174 			if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4175 				EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4176 				    " cmdl1=%lxh, statl2=%lxh\n",
4177 				    cmd->pm_cmd_len, cmd->pm_stat_len);
4178 				rval = FC_TOOMANY;
4179 				break;
4180 			}
4181 			/* add four bytes for the opcode */
4182 			echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4183 
4184 			/*
4185 			 * are we 32 or 64 bit addressed???
4186 			 * We need to get the appropriate
4187 			 * DMA and set the command options;
4188 			 * 64 bit (bit 6) or 32 bit
4189 			 * (no bit 6) addressing.
4190 			 * while we are at it lets ask for
4191 			 * real echo (bit 15)
4192 			 */
4193 			echo.options = BIT_15;
4194 			if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4195 				echo.options = (uint16_t)
4196 				    (echo.options | BIT_6);
4197 			}
4198 
4199 			/*
4200 			 * Set up the DMA mappings for the
4201 			 * output and input data buffers.
4202 			 * First the output buffer
4203 			 */
4204 			if (ql_get_dma_mem(ha, &buffer_xmt,
4205 			    (uint32_t)(cmd->pm_data_len + 4),
4206 			    LITTLE_ENDIAN_DMA, MEM_DATA_ALIGN) != QL_SUCCESS) {
4207 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4208 				rval = FC_NOMEM;
4209 				break;
4210 			}
4211 			echo.transfer_data_address = buffer_xmt.cookie;
4212 
4213 			/* Next the input buffer */
4214 			if (ql_get_dma_mem(ha, &buffer_rcv,
4215 			    (uint32_t)(cmd->pm_data_len + 4),
4216 			    LITTLE_ENDIAN_DMA, MEM_DATA_ALIGN) != QL_SUCCESS) {
4217 				/*
4218 				 * since we could not allocate
4219 				 * DMA space for the input
4220 				 * buffer we need to clean up
4221 				 * by freeing the DMA space
4222 				 * we allocated for the output
4223 				 * buffer
4224 				 */
4225 				ql_free_phys(ha, &buffer_xmt);
4226 				EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4227 				rval = FC_NOMEM;
4228 				break;
4229 			}
4230 			echo.receive_data_address = buffer_rcv.cookie;
4231 
4232 			/*
4233 			 * copy the 4 byte ECHO op code to the
4234 			 * allocated DMA space
4235 			 */
4236 			ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4237 			    (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4238 
4239 			/*
4240 			 * copy the user supplied data to the
4241 			 * allocated DMA space
4242 			 */
4243 			ddi_rep_put8(buffer_xmt.acc_handle,
4244 			    (uint8_t *)cmd->pm_cmd_buf,
4245 			    (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4246 			    DDI_DEV_AUTOINCR);
4247 
4248 			/* Shutdown IP. */
4249 			if (pha->flags & IP_INITIALIZED) {
4250 				(void) ql_shutdown_ip(pha);
4251 			}
4252 
4253 			/* send the echo */
4254 			if (ql_echo(ha, &echo) == QL_SUCCESS) {
4255 				ddi_rep_put8(buffer_rcv.acc_handle,
4256 				    (uint8_t *)buffer_rcv.bp + 4,
4257 				    (uint8_t *)cmd->pm_stat_buf,
4258 				    cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4259 			} else {
4260 				EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4261 				rval = FC_FAILURE;
4262 			}
4263 
4264 			/* Restart IP if it was shutdown. */
4265 			if (pha->flags & IP_ENABLED &&
4266 			    !(pha->flags & IP_INITIALIZED)) {
4267 				(void) ql_initialize_ip(pha);
4268 				ql_isp_rcvbuf(pha);
4269 			}
4270 			/* free up our DMA buffers */
4271 			ql_free_phys(ha, &buffer_xmt);
4272 			ql_free_phys(ha, &buffer_rcv);
4273 			break;
4274 		}
4275 		default:
4276 			EL(ha, "unknown=%xh, FC_PORT_DIAG "
4277 			    "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4278 			rval = FC_INVALID_REQUEST;
4279 			break;
4280 		}
4281 		PORTMANAGE_UNLOCK(ha);
4282 		break;
4283 	case FC_PORT_LINK_STATE:
4284 		/* Check for name equal to null. */
4285 		for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4286 		    index++) {
4287 			if (cmd->pm_cmd_buf[index] != 0) {
4288 				break;
4289 			}
4290 		}
4291 
4292 		/* If name not null. */
4293 		if (index < 8 && cmd->pm_cmd_len >= 8) {
4294 			/* Locate device queue. */
4295 			tq = NULL;
4296 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4297 			    tq == NULL; index++) {
4298 				for (link = ha->dev[index].first; link != NULL;
4299 				    link = link->next) {
4300 					tq = link->base_address;
4301 
4302 					if (bcmp((void *)&tq->port_name[0],
4303 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4304 						break;
4305 					} else {
4306 						tq = NULL;
4307 					}
4308 				}
4309 			}
4310 
4311 			if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4312 				cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4313 				cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4314 			} else {
4315 				cnt = FC_PORT_SPEED_MASK(ha->state) |
4316 				    FC_STATE_OFFLINE;
4317 				cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4318 				cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4319 			}
4320 		} else {
4321 			cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4322 			cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4323 		}
4324 		break;
4325 	case FC_PORT_INITIALIZE:
4326 		if (cmd->pm_cmd_len >= 8) {
4327 			tq = NULL;
4328 			for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4329 			    tq == NULL; index++) {
4330 				for (link = ha->dev[index].first; link != NULL;
4331 				    link = link->next) {
4332 					tq = link->base_address;
4333 
4334 					if (bcmp((void *)&tq->port_name[0],
4335 					    (void *)cmd->pm_cmd_buf, 8) == 0) {
4336 						if (!VALID_DEVICE_ID(ha,
4337 						    tq->loop_id)) {
4338 							tq = NULL;
4339 						}
4340 						break;
4341 					} else {
4342 						tq = NULL;
4343 					}
4344 				}
4345 			}
4346 
4347 			if (tq == NULL || ql_target_reset(ha, tq,
4348 			    ha->loop_reset_delay) != QL_SUCCESS) {
4349 				EL(ha, "failed, FC_PORT_INITIALIZE "
4350 				    "FC_FAILURE\n");
4351 				rval = FC_FAILURE;
4352 			}
4353 		} else {
4354 			EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4355 			    "clen=%lxh\n", cmd->pm_cmd_len);
4356 
4357 			rval = FC_FAILURE;
4358 		}
4359 		break;
4360 	case FC_PORT_RLS:
4361 		if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4362 			EL(ha, "failed, buffer size passed: %lxh, "
4363 			    "req: %lxh\n", cmd->pm_data_len,
4364 			    (sizeof (fc_rls_acc_t)));
4365 			rval = FC_FAILURE;
4366 		} else if (LOOP_NOT_READY(pha)) {
4367 			EL(ha, "loop NOT ready\n");
4368 			bzero(cmd->pm_data_buf, cmd->pm_data_len);
4369 		} else if (ql_get_link_status(ha, ha->loop_id,
4370 		    cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4371 			EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4372 			rval = FC_FAILURE;
4373 #ifdef _BIG_ENDIAN
4374 		} else {
4375 			fc_rls_acc_t		*rls;
4376 
4377 			rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4378 			LITTLE_ENDIAN_32(&rls->rls_link_fail);
4379 			LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4380 			LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4381 			LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4382 #endif /* _BIG_ENDIAN */
4383 		}
4384 		break;
4385 	case FC_PORT_GET_NODE_ID:
4386 		if (ql_get_rnid_params(ha, cmd->pm_data_len,
4387 		    cmd->pm_data_buf) != QL_SUCCESS) {
4388 			EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4389 			rval = FC_FAILURE;
4390 		}
4391 		break;
4392 	case FC_PORT_SET_NODE_ID:
4393 		if (ql_set_rnid_params(ha, cmd->pm_data_len,
4394 		    cmd->pm_data_buf) != QL_SUCCESS) {
4395 			EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4396 			rval = FC_FAILURE;
4397 		}
4398 		break;
4399 	case FC_PORT_DOWNLOAD_FCODE:
4400 		PORTMANAGE_LOCK(ha);
4401 		if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
4402 			rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4403 			    (uint32_t)cmd->pm_data_len);
4404 		} else {
4405 			if (cmd->pm_data_buf[0] == 4 &&
4406 			    cmd->pm_data_buf[8] == 0 &&
4407 			    cmd->pm_data_buf[9] == 0x10 &&
4408 			    cmd->pm_data_buf[10] == 0 &&
4409 			    cmd->pm_data_buf[11] == 0) {
4410 				rval = ql_24xx_load_flash(ha,
4411 				    (uint8_t *)cmd->pm_data_buf,
4412 				    (uint32_t)cmd->pm_data_len,
4413 				    FLASH_24XX_FIRMWARE_ADDR);
4414 			} else {
4415 				rval = ql_24xx_load_flash(ha,
4416 				    (uint8_t *)cmd->pm_data_buf,
4417 				    (uint32_t)cmd->pm_data_len, 0);
4418 			}
4419 		}
4420 
4421 		if (rval != QL_SUCCESS) {
4422 			EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4423 			rval = FC_FAILURE;
4424 		} else {
4425 			rval = FC_SUCCESS;
4426 		}
4427 		ql_reset_chip(ha);
4428 		(void) ql_abort_isp(ha);
4429 		PORTMANAGE_UNLOCK(ha);
4430 		break;
4431 	default:
4432 		EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4433 		rval = FC_BADCMD;
4434 		break;
4435 	}
4436 
4437 	/* Wait for suspension to end. */
4438 	ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4439 	timer = 0;
4440 
4441 	while (timer++ < 3000 &&
4442 	    ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4443 		ql_delay(ha, 10000);
4444 	}
4445 
4446 	ql_restart_queues(ha);
4447 
4448 	if (rval != FC_SUCCESS) {
4449 		EL(ha, "failed, rval = %xh\n", rval);
4450 	} else {
4451 		/*EMPTY*/
4452 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4453 	}
4454 
4455 	return (rval);
4456 }
4457 
4458 static opaque_t
4459 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4460 {
4461 	port_id_t		id;
4462 	ql_adapter_state_t	*ha;
4463 
4464 	id.r.rsvd_1 = 0;
4465 	id.b24 = d_id.port_id;
4466 
4467 	ha = ql_fca_handle_to_state(fca_handle);
4468 	if (ha == NULL) {
4469 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4470 		    (void *)fca_handle);
4471 		return (NULL);
4472 	}
4473 
4474 	return (ql_d_id_to_queue(ha, id));
4475 }
4476 
4477 /*
4478  * ql_notify
4479  *	Receive notifications from ULPs regarding particular actions
4480  *
4481  * Input:
4482  *	fca_handle = handle set up by ql_bind_port().
4483  *	cmd = flag indicating the action to take
4484  *
4485  * Output:
4486  *	FC_SUCCESS - action was taken successfully or no action was needed.
4487  *	FC_FAILURE - action was attempted and failed.
4488  *	FC_UNBOUND - the specified handle is not bound to a port.
4489  */
4490 static int
4491 ql_notify(opaque_t fca_handle, uint32_t cmd)
4492 {
4493 	ql_adapter_state_t		*ha;
4494 	int				rval = FC_SUCCESS;
4495 	tgt_cmd_t			*tgtcmd;
4496 	notify_acknowledge_entry_t	*nack;
4497 
4498 	ha = ql_fca_handle_to_state(fca_handle);
4499 	if (ha == NULL) {
4500 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4501 		    (void *)fca_handle);
4502 		return (FC_UNBOUND);
4503 	}
4504 	QL_PRINT_3(CE_CONT, "(%d): started cmd = %xh\n", ha->instance, cmd);
4505 
4506 	switch (FC_NOTIFY_GET_FLAG(cmd)) {
4507 	case FC_NOTIFY_RECOVERY_DONE:
4508 
4509 		QL_PRINT_3(CE_CONT, "(%d): got NOTIFY_RECOVERY cmd=%xh\n",
4510 		    ha->instance, cmd);
4511 
4512 		mutex_enter(&ha->ql_nack_mtx);
4513 		tgtcmd = ha->ql_nack;
4514 		ha->ql_nack = NULL;
4515 		mutex_exit(&ha->ql_nack_mtx);
4516 
4517 		if (tgtcmd != NULL) {
4518 			QL_PRINT_3(CE_CONT, "(%d): N_ACK pending\n",
4519 			    ha->instance);
4520 
4521 			rval = ql_req_pkt(ha, (request_t **)&nack);
4522 			if (rval == QL_SUCCESS) {
4523 				ql_notify_acknowledge_iocb(ha, tgtcmd, nack);
4524 
4525 				QL_PRINT_3(CE_CONT, "(%d): send notify_ack: "
4526 				    "status=%xh flag=%xh\n", ha->instance,
4527 				    tgtcmd->status, nack->flags_l);
4528 
4529 				kmem_free(tgtcmd, sizeof (tgt_cmd_t));
4530 				/* Issue command to ISP */
4531 				ql_isp_cmd(ha);
4532 			} else {
4533 				kmem_free(tgtcmd, sizeof (tgt_cmd_t));
4534 			}
4535 		}
4536 		break;
4537 
4538 	case FC_NOTIFY_RECOVERY_CLEANUP:
4539 		break;
4540 
4541 	case FC_NOTIFY_TARGET_MODE:
4542 		if (CFG_IST(ha, CFG_TARGET_MODE_ENABLE)) {
4543 			break;
4544 		}
4545 
4546 		ha->cfg_flags |= (CFG_ENABLE_TARGET_MODE |
4547 		    CFG_ENABLE_HARD_ADDRESS);
4548 		ha->port_hard_address.r.d_id[0] =
4549 		    LSB(LSW(FC_NOTIFY_GET_VALUE(cmd)));
4550 		ha->port_hard_address.r.d_id[1] =
4551 		    MSB(LSW(FC_NOTIFY_GET_VALUE(cmd)));
4552 		ha->port_hard_address.r.d_id[2] =
4553 		    LSB(MSW(FC_NOTIFY_GET_VALUE(cmd)));
4554 		QL_PRINT_3(CE_CONT, "(%d): Target mode set, hard address ="
4555 		    " %xh\n", ha->instance, ha->port_hard_address.b24);
4556 		rval = ql_initialize_adapter(ha);
4557 		ql_awaken_task_daemon(ha, NULL, 0, 0);
4558 		break;
4559 
4560 	case FC_NOTIFY_NO_TARGET_MODE:
4561 		if (!CFG_IST(ha, CFG_TARGET_MODE_ENABLE)) {
4562 			break;
4563 		}
4564 		ha->cfg_flags &= ~(CFG_ENABLE_TARGET_MODE |
4565 		    CFG_ENABLE_HARD_ADDRESS);
4566 		QL_PRINT_3(CE_CONT, "(%d): Target mode cleared\n",
4567 		    ha->instance);
4568 		rval = ql_initialize_adapter(ha);
4569 		ql_awaken_task_daemon(ha, NULL, 0, LOOP_DOWN);
4570 		break;
4571 
4572 	case FC_NOTIFY_THROTTLE:
4573 		cmn_err(CE_NOTE, "!%s(%d) max cmds per target %xh", QL_NAME,
4574 		    ha->instance, FC_NOTIFY_GET_VALUE(cmd));
4575 		break;
4576 
4577 	default:
4578 		break;
4579 	}
4580 
4581 	if (rval != FC_SUCCESS) {
4582 		EL(ha, "failed=%xh\n", rval);
4583 	} else {
4584 		/*EMPTY*/
4585 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4586 	}
4587 	return (rval);
4588 }
4589 
4590 /* ************************************************************************ */
4591 /*			FCA Driver Local Support Functions.		    */
4592 /* ************************************************************************ */
4593 
4594 /*
4595  * ql_cmd_setup
4596  *	Verifies proper command.
4597  *
4598  * Input:
4599  *	fca_handle = handle setup by ql_bind_port().
4600  *	pkt = pointer to fc_packet.
4601  *	rval = pointer for return value.
4602  *
4603  * Returns:
4604  *	Adapter state pointer, NULL = failure.
4605  *
4606  * Context:
4607  *	Kernel context.
4608  */
4609 static ql_adapter_state_t *
4610 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4611 {
4612 	ql_adapter_state_t	*ha, *pha;
4613 	ql_srb_t		*sp = (ql_srb_t *)pkt->pkt_fca_private;
4614 	ql_tgt_t		*tq;
4615 	port_id_t		d_id;
4616 
4617 	pkt->pkt_resp_resid = 0;
4618 	pkt->pkt_data_resid = 0;
4619 
4620 	/* check that the handle is assigned by this FCA */
4621 	ha = ql_fca_handle_to_state(fca_handle);
4622 	if (ha == NULL) {
4623 		*rval = FC_UNBOUND;
4624 		QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4625 		    (void *)fca_handle);
4626 		return (NULL);
4627 	}
4628 	pha = ha->pha;
4629 
4630 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4631 
4632 	if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4633 		return (ha);
4634 	}
4635 
4636 	if (!(pha->flags & ONLINE)) {
4637 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
4638 		pkt->pkt_reason = FC_REASON_HW_ERROR;
4639 		*rval = FC_TRANSPORT_ERROR;
4640 		EL(ha, "failed, not online hf=%xh\n", pha->flags);
4641 		return (NULL);
4642 	}
4643 
4644 	/* Exit on loop down. */
4645 	if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4646 	    pha->task_daemon_flags & LOOP_DOWN &&
4647 	    pha->loop_down_timer <= pha->loop_down_abort_time) {
4648 		pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4649 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4650 		*rval = FC_OFFLINE;
4651 		EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4652 		return (NULL);
4653 	}
4654 
4655 	if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4656 	    pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4657 		tq = (ql_tgt_t *)pkt->pkt_fca_device;
4658 		if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4659 			d_id.r.rsvd_1 = 0;
4660 			d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4661 			tq = ql_d_id_to_queue(ha, d_id);
4662 
4663 			pkt->pkt_fca_device = (opaque_t)tq;
4664 		}
4665 
4666 		if (tq != NULL) {
4667 			DEVICE_QUEUE_LOCK(tq);
4668 			if (tq->flags & (TQF_RSCN_RCVD |
4669 			    TQF_NEED_AUTHENTICATION)) {
4670 				*rval = FC_DEVICE_BUSY;
4671 				DEVICE_QUEUE_UNLOCK(tq);
4672 				EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4673 				    tq->flags, tq->d_id.b24);
4674 				return (NULL);
4675 			}
4676 			DEVICE_QUEUE_UNLOCK(tq);
4677 		}
4678 	}
4679 
4680 	/*
4681 	 * Check DMA pointers.
4682 	 */
4683 	*rval = DDI_SUCCESS;
4684 	if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4685 		QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4686 		*rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4687 		if (*rval == DDI_SUCCESS) {
4688 			*rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4689 		}
4690 	}
4691 
4692 	if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4693 	    pkt->pkt_rsplen != 0) {
4694 		QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4695 		*rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4696 		if (*rval == DDI_SUCCESS) {
4697 			*rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4698 		}
4699 	}
4700 
4701 	/*
4702 	 * Minimum branch conditional; Change it with care.
4703 	 */
4704 	if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4705 	    (pkt->pkt_datalen != 0)) != 0) {
4706 		QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4707 		*rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4708 		if (*rval == DDI_SUCCESS) {
4709 			*rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4710 		}
4711 	}
4712 
4713 	if (*rval != DDI_SUCCESS) {
4714 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4715 		pkt->pkt_reason = FC_REASON_DMA_ERROR;
4716 
4717 		/* Do command callback. */
4718 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4719 			ql_awaken_task_daemon(ha, sp, 0, 0);
4720 		}
4721 		*rval = FC_BADPACKET;
4722 		EL(ha, "failed, bad DMA pointers\n");
4723 		return (NULL);
4724 	}
4725 
4726 	if (sp->magic_number != QL_FCA_BRAND) {
4727 		*rval = FC_BADPACKET;
4728 		EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4729 		return (NULL);
4730 	}
4731 	*rval = FC_SUCCESS;
4732 
4733 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4734 
4735 	return (ha);
4736 }
4737 
4738 /*
4739  * ql_els_plogi
4740  *	Issue a extended link service port login request.
4741  *
4742  * Input:
4743  *	ha = adapter state pointer.
4744  *	pkt = pointer to fc_packet.
4745  *
4746  * Returns:
4747  *	FC_SUCCESS - the packet was accepted for transport.
4748  *	FC_TRANSPORT_ERROR - a transport error occurred.
4749  *
4750  * Context:
4751  *	Kernel context.
4752  */
4753 static int
4754 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4755 {
4756 	ql_tgt_t		*tq = NULL;
4757 	port_id_t		d_id;
4758 	la_els_logi_t		acc;
4759 	class_svc_param_t	*class3_param;
4760 	int			ret;
4761 	int			rval = FC_SUCCESS;
4762 
4763 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4764 
4765 	TASK_DAEMON_LOCK(ha);
4766 	if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4767 		TASK_DAEMON_UNLOCK(ha);
4768 		QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4769 		return (FC_OFFLINE);
4770 	}
4771 	TASK_DAEMON_UNLOCK(ha);
4772 
4773 	bzero(&acc, sizeof (acc));
4774 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4775 
4776 	switch (ret = ql_login_port(ha, d_id)) {
4777 	case QL_SUCCESS:
4778 		tq = ql_d_id_to_queue(ha, d_id);
4779 		break;
4780 
4781 	case QL_LOOP_ID_USED:
4782 		if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4783 			tq = ql_d_id_to_queue(ha, d_id);
4784 		}
4785 		break;
4786 
4787 	default:
4788 		break;
4789 	}
4790 
4791 	if (ret != QL_SUCCESS) {
4792 		/*
4793 		 * Invalidate this entry so as to seek a fresh loop ID
4794 		 * in case firmware reassigns it to something else
4795 		 */
4796 		tq = ql_d_id_to_queue(ha, d_id);
4797 		if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4798 			tq->loop_id = PORT_NO_LOOP_ID;
4799 		}
4800 	} else if (tq) {
4801 		(void) ql_get_port_database(ha, tq, PDF_ADISC);
4802 	}
4803 
4804 	if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4805 	    (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4806 
4807 		/* Build ACC. */
4808 		acc.ls_code.ls_code = LA_ELS_ACC;
4809 		acc.common_service.fcph_version = 0x2006;
4810 		acc.common_service.cmn_features = 0x8800;
4811 		CFG_IST(ha, CFG_CTRL_2425) ?
4812 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4813 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4814 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4815 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4816 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4817 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4818 		acc.common_service.conc_sequences = 0xff;
4819 		acc.common_service.relative_offset = 0x03;
4820 		acc.common_service.e_d_tov = 0x7d0;
4821 
4822 		bcopy((void *)&tq->port_name[0],
4823 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4824 		bcopy((void *)&tq->node_name[0],
4825 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4826 
4827 		class3_param = (class_svc_param_t *)&acc.class_3;
4828 		class3_param->class_valid_svc_opt = 0x8000;
4829 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4830 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4831 		class3_param->conc_sequences = tq->class3_conc_sequences;
4832 		class3_param->open_sequences_per_exch =
4833 		    tq->class3_open_sequences_per_exch;
4834 
4835 		if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4836 			acc.ls_code.ls_code = LA_ELS_RJT;
4837 			pkt->pkt_state = FC_PKT_TRAN_BSY;
4838 			pkt->pkt_reason = FC_REASON_XCHG_BSY;
4839 			rval = FC_TRAN_BUSY;
4840 		} else {
4841 			DEVICE_QUEUE_LOCK(tq);
4842 			tq->logout_sent = 0;
4843 			tq->flags &= ~TQF_NEED_AUTHENTICATION;
4844 			if (CFG_IST(ha, CFG_CTRL_2425)) {
4845 				tq->flags |= TQF_IIDMA_NEEDED;
4846 			}
4847 			DEVICE_QUEUE_UNLOCK(tq);
4848 
4849 			if (CFG_IST(ha, CFG_CTRL_2425)) {
4850 				TASK_DAEMON_LOCK(ha);
4851 				ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4852 				TASK_DAEMON_UNLOCK(ha);
4853 			}
4854 
4855 			pkt->pkt_state = FC_PKT_SUCCESS;
4856 		}
4857 	} else {
4858 		/* Build RJT. */
4859 		acc.ls_code.ls_code = LA_ELS_RJT;
4860 
4861 		switch (ret) {
4862 		case QL_FUNCTION_TIMEOUT:
4863 			pkt->pkt_state = FC_PKT_TIMEOUT;
4864 			pkt->pkt_reason = FC_REASON_HW_ERROR;
4865 			break;
4866 
4867 		case QL_MEMORY_ALLOC_FAILED:
4868 			pkt->pkt_state = FC_PKT_LOCAL_BSY;
4869 			pkt->pkt_reason = FC_REASON_NOMEM;
4870 			rval = FC_TRAN_BUSY;
4871 			break;
4872 
4873 		case QL_FABRIC_NOT_INITIALIZED:
4874 			pkt->pkt_state = FC_PKT_FABRIC_BSY;
4875 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4876 			rval = FC_TRAN_BUSY;
4877 			break;
4878 
4879 		default:
4880 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
4881 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4882 			break;
4883 		}
4884 
4885 		EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4886 		    "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4887 		    pkt->pkt_reason, ret, rval);
4888 	}
4889 
4890 	if (tq != NULL) {
4891 		DEVICE_QUEUE_LOCK(tq);
4892 		tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
4893 		if (rval == FC_TRAN_BUSY) {
4894 			if (tq->d_id.b24 != BROADCAST_ADDR) {
4895 				tq->flags |= TQF_NEED_AUTHENTICATION;
4896 			}
4897 		}
4898 		DEVICE_QUEUE_UNLOCK(tq);
4899 	}
4900 
4901 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4902 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4903 
4904 	if (rval != FC_SUCCESS) {
4905 		EL(ha, "failed, rval = %xh\n", rval);
4906 	} else {
4907 		/*EMPTY*/
4908 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4909 	}
4910 	return (rval);
4911 }
4912 
4913 /*
4914  * ql_els_flogi
4915  *	Issue a extended link service fabric login request.
4916  *
4917  * Input:
4918  *	ha = adapter state pointer.
4919  *	pkt = pointer to fc_packet.
4920  *
4921  * Returns:
4922  *	FC_SUCCESS - the packet was accepted for transport.
4923  *	FC_TRANSPORT_ERROR - a transport error occurred.
4924  *
4925  * Context:
4926  *	Kernel context.
4927  */
4928 static int
4929 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4930 {
4931 	ql_tgt_t		*tq;
4932 	port_id_t		d_id;
4933 	la_els_logi_t		acc;
4934 	class_svc_param_t	*class3_param;
4935 	int			rval = FC_SUCCESS;
4936 
4937 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4938 
4939 	bzero(&acc, sizeof (acc));
4940 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4941 
4942 	tq = ql_d_id_to_queue(ha, d_id);
4943 	if (tq != NULL) {
4944 		/* Build ACC. */
4945 		acc.ls_code.ls_code = LA_ELS_ACC;
4946 		acc.common_service.fcph_version = 0x2006;
4947 		acc.common_service.cmn_features = 0x1b00;
4948 		CFG_IST(ha, CFG_CTRL_2425) ?
4949 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4950 		    ha->init_ctrl_blk.cb24.max_frame_length[0],
4951 		    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
4952 		    (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
4953 		    ha->init_ctrl_blk.cb.max_frame_length[0],
4954 		    ha->init_ctrl_blk.cb.max_frame_length[1]));
4955 		acc.common_service.conc_sequences = 0xff;
4956 		acc.common_service.relative_offset = 0x03;
4957 		acc.common_service.e_d_tov = 0x7d0;
4958 
4959 		bcopy((void *)&tq->port_name[0],
4960 		    (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4961 		bcopy((void *)&tq->node_name[0],
4962 		    (void *)&acc.node_ww_name.raw_wwn[0], 8);
4963 
4964 		class3_param = (class_svc_param_t *)&acc.class_3;
4965 		class3_param->class_valid_svc_opt = 0x8800;
4966 		class3_param->recipient_ctl = tq->class3_recipient_ctl;
4967 		class3_param->rcv_data_size = tq->class3_rcv_data_size;
4968 		class3_param->conc_sequences = tq->class3_conc_sequences;
4969 		class3_param->open_sequences_per_exch =
4970 		    tq->class3_open_sequences_per_exch;
4971 
4972 		pkt->pkt_state = FC_PKT_SUCCESS;
4973 	} else {
4974 		/* Build RJT. */
4975 		acc.ls_code.ls_code = LA_ELS_RJT;
4976 
4977 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
4978 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4979 	}
4980 
4981 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
4982 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
4983 
4984 	if (rval != FC_SUCCESS) {
4985 		EL(ha, "failed, rval = %xh\n", rval);
4986 	} else {
4987 		/*EMPTY*/
4988 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4989 	}
4990 	return (rval);
4991 }
4992 
4993 /*
4994  * ql_els_logo
4995  *	Issue a extended link service logout request.
4996  *
4997  * Input:
4998  *	ha = adapter state pointer.
4999  *	pkt = pointer to fc_packet.
5000  *
5001  * Returns:
5002  *	FC_SUCCESS - the packet was accepted for transport.
5003  *	FC_TRANSPORT_ERROR - a transport error occurred.
5004  *
5005  * Context:
5006  *	Kernel context.
5007  */
5008 static int
5009 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5010 {
5011 	port_id_t	d_id;
5012 	ql_tgt_t	*tq;
5013 	la_els_logo_t	acc;
5014 	int		rval = FC_SUCCESS;
5015 
5016 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5017 
5018 	bzero(&acc, sizeof (acc));
5019 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5020 
5021 	tq = ql_d_id_to_queue(ha, d_id);
5022 	if (tq) {
5023 		DEVICE_QUEUE_LOCK(tq);
5024 		if (tq->d_id.b24 == BROADCAST_ADDR) {
5025 			DEVICE_QUEUE_UNLOCK(tq);
5026 			return (FC_SUCCESS);
5027 		}
5028 
5029 		tq->flags |= TQF_NEED_AUTHENTICATION;
5030 
5031 		do {
5032 			DEVICE_QUEUE_UNLOCK(tq);
5033 			(void) ql_abort_device(ha, tq, 1);
5034 
5035 			/*
5036 			 * Wait for commands to drain in F/W (doesn't
5037 			 * take more than a few milliseconds)
5038 			 */
5039 			ql_delay(ha, 10000);
5040 
5041 			DEVICE_QUEUE_LOCK(tq);
5042 		} while (tq->outcnt);
5043 
5044 		DEVICE_QUEUE_UNLOCK(tq);
5045 	}
5046 
5047 	if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5048 		/* Build ACC. */
5049 		acc.ls_code.ls_code = LA_ELS_ACC;
5050 
5051 		pkt->pkt_state = FC_PKT_SUCCESS;
5052 	} else {
5053 		/* Build RJT. */
5054 		acc.ls_code.ls_code = LA_ELS_RJT;
5055 
5056 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5057 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5058 	}
5059 
5060 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5061 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5062 
5063 	if (rval != FC_SUCCESS) {
5064 		EL(ha, "failed, rval = %xh\n", rval);
5065 	} else {
5066 		/*EMPTY*/
5067 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5068 	}
5069 	return (rval);
5070 }
5071 
5072 /*
5073  * ql_els_prli
5074  *	Issue a extended link service process login request.
5075  *
5076  * Input:
5077  *	ha = adapter state pointer.
5078  *	pkt = pointer to fc_packet.
5079  *
5080  * Returns:
5081  *	FC_SUCCESS - the packet was accepted for transport.
5082  *	FC_TRANSPORT_ERROR - a transport error occurred.
5083  *
5084  * Context:
5085  *	Kernel context.
5086  */
5087 static int
5088 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5089 {
5090 	ql_tgt_t		*tq;
5091 	port_id_t		d_id;
5092 	la_els_prli_t		acc;
5093 	prli_svc_param_t	*param;
5094 	int			rval = FC_SUCCESS;
5095 
5096 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5097 
5098 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5099 
5100 	tq = ql_d_id_to_queue(ha, d_id);
5101 	if (tq != NULL) {
5102 
5103 		/* Build ACC. */
5104 		bzero(&acc, sizeof (acc));
5105 		acc.ls_code = LA_ELS_ACC;
5106 		acc.page_length = 0x10;
5107 		acc.payload_length = tq->prli_payload_length;
5108 
5109 		param = (prli_svc_param_t *)&acc.service_params[0];
5110 		param->type = 0x08;
5111 		param->rsvd = 0x00;
5112 		param->process_assoc_flags = tq->prli_svc_param_word_0;
5113 		param->process_flags = tq->prli_svc_param_word_3;
5114 
5115 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5116 		    (uint8_t *)pkt->pkt_resp, sizeof (acc),
5117 		    DDI_DEV_AUTOINCR);
5118 
5119 		pkt->pkt_state = FC_PKT_SUCCESS;
5120 	} else {
5121 		la_els_rjt_t rjt;
5122 
5123 		/* Build RJT. */
5124 		bzero(&rjt, sizeof (rjt));
5125 		rjt.ls_code.ls_code = LA_ELS_RJT;
5126 
5127 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5128 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5129 
5130 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5131 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5132 	}
5133 
5134 	if (rval != FC_SUCCESS) {
5135 		EL(ha, "failed, rval = %xh\n", rval);
5136 	} else {
5137 		/*EMPTY*/
5138 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5139 	}
5140 	return (rval);
5141 }
5142 
5143 /*
5144  * ql_els_prlo
5145  *	Issue a extended link service process logout request.
5146  *
5147  * Input:
5148  *	ha = adapter state pointer.
5149  *	pkt = pointer to fc_packet.
5150  *
5151  * Returns:
5152  *	FC_SUCCESS - the packet was accepted for transport.
5153  *	FC_TRANSPORT_ERROR - a transport error occurred.
5154  *
5155  * Context:
5156  *	Kernel context.
5157  */
5158 /* ARGSUSED */
5159 static int
5160 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5161 {
5162 	la_els_prli_t	acc;
5163 	int		rval = FC_SUCCESS;
5164 
5165 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5166 
5167 	/* Build ACC. */
5168 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5169 	    (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5170 
5171 	acc.ls_code = LA_ELS_ACC;
5172 	acc.service_params[2] = 1;
5173 
5174 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5175 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5176 
5177 	pkt->pkt_state = FC_PKT_SUCCESS;
5178 
5179 	if (rval != FC_SUCCESS) {
5180 		EL(ha, "failed, rval = %xh\n", rval);
5181 	} else {
5182 		/*EMPTY*/
5183 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5184 	}
5185 	return (rval);
5186 }
5187 
5188 /*
5189  * ql_els_adisc
5190  *	Issue a extended link service address discovery request.
5191  *
5192  * Input:
5193  *	ha = adapter state pointer.
5194  *	pkt = pointer to fc_packet.
5195  *
5196  * Returns:
5197  *	FC_SUCCESS - the packet was accepted for transport.
5198  *	FC_TRANSPORT_ERROR - a transport error occurred.
5199  *
5200  * Context:
5201  *	Kernel context.
5202  */
5203 static int
5204 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5205 {
5206 	ql_dev_id_list_t	*list;
5207 	uint32_t		list_size;
5208 	ql_link_t		*link;
5209 	ql_tgt_t		*tq;
5210 	ql_lun_t		*lq;
5211 	port_id_t		d_id;
5212 	la_els_adisc_t		acc;
5213 	uint16_t		index, loop_id;
5214 	ql_mbx_data_t		mr;
5215 	int			rval = FC_SUCCESS;
5216 
5217 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5218 
5219 	bzero(&acc, sizeof (acc));
5220 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5221 
5222 	/*
5223 	 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5224 	 * the device from the firmware
5225 	 */
5226 	index = ql_alpa_to_index[d_id.b.al_pa];
5227 	tq = NULL;
5228 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5229 		tq = link->base_address;
5230 		if (tq->d_id.b24 == d_id.b24) {
5231 			break;
5232 		} else {
5233 			tq = NULL;
5234 		}
5235 	}
5236 
5237 	if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5238 		list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5239 		list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5240 
5241 		if (list != NULL &&
5242 		    ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5243 		    QL_SUCCESS) {
5244 
5245 			for (index = 0; index < mr.mb[1]; index++) {
5246 				ql_dev_list(ha, list, index, &d_id, &loop_id);
5247 
5248 				if (tq->d_id.b24 == d_id.b24) {
5249 					tq->loop_id = loop_id;
5250 					break;
5251 				}
5252 			}
5253 		} else {
5254 			cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5255 			    QL_NAME, ha->instance, d_id.b24);
5256 			tq = NULL;
5257 		}
5258 		if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5259 			cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5260 			    QL_NAME, ha->instance, tq->d_id.b24);
5261 			tq = NULL;
5262 		}
5263 
5264 		if (list != NULL) {
5265 			kmem_free(list, list_size);
5266 		}
5267 	}
5268 
5269 	if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5270 	    ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5271 
5272 		/* Build ACC. */
5273 
5274 		DEVICE_QUEUE_LOCK(tq);
5275 		tq->flags &= ~TQF_NEED_AUTHENTICATION;
5276 		if (tq->prli_svc_param_word_3 & BIT_8) {
5277 			for (link = tq->lun_queues.first; link != NULL;
5278 			    link = link->next) {
5279 				lq = link->base_address;
5280 
5281 				if (lq->cmd.first != NULL) {
5282 					ql_next(ha, lq);
5283 					DEVICE_QUEUE_LOCK(tq);
5284 				}
5285 			}
5286 		}
5287 		DEVICE_QUEUE_UNLOCK(tq);
5288 
5289 		acc.ls_code.ls_code = LA_ELS_ACC;
5290 		acc.hard_addr.hard_addr = tq->hard_addr.b24;
5291 
5292 		bcopy((void *)&tq->port_name[0],
5293 		    (void *)&acc.port_wwn.raw_wwn[0], 8);
5294 		bcopy((void *)&tq->node_name[0],
5295 		    (void *)&acc.node_wwn.raw_wwn[0], 8);
5296 
5297 		acc.nport_id.port_id = tq->d_id.b24;
5298 
5299 		pkt->pkt_state = FC_PKT_SUCCESS;
5300 	} else {
5301 		/* Build RJT. */
5302 		acc.ls_code.ls_code = LA_ELS_RJT;
5303 
5304 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5305 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5306 	}
5307 
5308 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5309 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5310 
5311 	if (rval != FC_SUCCESS) {
5312 		EL(ha, "failed, rval = %xh\n", rval);
5313 	} else {
5314 		/*EMPTY*/
5315 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5316 	}
5317 	return (rval);
5318 }
5319 
5320 /*
5321  * ql_els_linit
5322  *	Issue a extended link service loop initialize request.
5323  *
5324  * Input:
5325  *	ha = adapter state pointer.
5326  *	pkt = pointer to fc_packet.
5327  *
5328  * Returns:
5329  *	FC_SUCCESS - the packet was accepted for transport.
5330  *	FC_TRANSPORT_ERROR - a transport error occurred.
5331  *
5332  * Context:
5333  *	Kernel context.
5334  */
5335 static int
5336 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5337 {
5338 	ddi_dma_cookie_t	*cp;
5339 	uint32_t		cnt;
5340 	conv_num_t		n;
5341 	port_id_t		d_id;
5342 	int			rval = FC_SUCCESS;
5343 
5344 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5345 
5346 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5347 	if (ha->topology & QL_SNS_CONNECTION) {
5348 		fc_linit_req_t els;
5349 		lfa_cmd_t lfa;
5350 
5351 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5352 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5353 
5354 		/* Setup LFA mailbox command data. */
5355 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5356 
5357 		lfa.resp_buffer_length[0] = 4;
5358 
5359 		cp = pkt->pkt_resp_cookie;
5360 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5361 			n.size64 = (uint64_t)cp->dmac_laddress;
5362 			LITTLE_ENDIAN_64(&n.size64);
5363 		} else {
5364 			n.size32[0] = LSD(cp->dmac_laddress);
5365 			LITTLE_ENDIAN_32(&n.size32[0]);
5366 			n.size32[1] = MSD(cp->dmac_laddress);
5367 			LITTLE_ENDIAN_32(&n.size32[1]);
5368 		}
5369 
5370 		/* Set buffer address. */
5371 		for (cnt = 0; cnt < 8; cnt++) {
5372 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5373 		}
5374 
5375 		lfa.subcommand_length[0] = 4;
5376 		n.size32[0] = d_id.b24;
5377 		LITTLE_ENDIAN_32(&n.size32[0]);
5378 		lfa.addr[0] = n.size8[0];
5379 		lfa.addr[1] = n.size8[1];
5380 		lfa.addr[2] = n.size8[2];
5381 		lfa.subcommand[1] = 0x70;
5382 		lfa.payload[2] = els.func;
5383 		lfa.payload[4] = els.lip_b3;
5384 		lfa.payload[5] = els.lip_b4;
5385 
5386 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5387 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5388 		} else {
5389 			pkt->pkt_state = FC_PKT_SUCCESS;
5390 		}
5391 	} else {
5392 		fc_linit_resp_t rjt;
5393 
5394 		/* Build RJT. */
5395 		bzero(&rjt, sizeof (rjt));
5396 		rjt.ls_code.ls_code = LA_ELS_RJT;
5397 
5398 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5399 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5400 
5401 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5402 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5403 	}
5404 
5405 	if (rval != FC_SUCCESS) {
5406 		EL(ha, "failed, rval = %xh\n", rval);
5407 	} else {
5408 		/*EMPTY*/
5409 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5410 	}
5411 	return (rval);
5412 }
5413 
5414 /*
5415  * ql_els_lpc
5416  *	Issue a extended link service loop control request.
5417  *
5418  * Input:
5419  *	ha = adapter state pointer.
5420  *	pkt = pointer to fc_packet.
5421  *
5422  * Returns:
5423  *	FC_SUCCESS - the packet was accepted for transport.
5424  *	FC_TRANSPORT_ERROR - a transport error occurred.
5425  *
5426  * Context:
5427  *	Kernel context.
5428  */
5429 static int
5430 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5431 {
5432 	ddi_dma_cookie_t	*cp;
5433 	uint32_t		cnt;
5434 	conv_num_t		n;
5435 	port_id_t		d_id;
5436 	int			rval = FC_SUCCESS;
5437 
5438 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5439 
5440 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5441 	if (ha->topology & QL_SNS_CONNECTION) {
5442 		ql_lpc_t els;
5443 		lfa_cmd_t lfa;
5444 
5445 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5446 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5447 
5448 		/* Setup LFA mailbox command data. */
5449 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5450 
5451 		lfa.resp_buffer_length[0] = 4;
5452 
5453 		cp = pkt->pkt_resp_cookie;
5454 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5455 			n.size64 = (uint64_t)(cp->dmac_laddress);
5456 			LITTLE_ENDIAN_64(&n.size64);
5457 		} else {
5458 			n.size32[0] = cp->dmac_address;
5459 			LITTLE_ENDIAN_32(&n.size32[0]);
5460 			n.size32[1] = 0;
5461 		}
5462 
5463 		/* Set buffer address. */
5464 		for (cnt = 0; cnt < 8; cnt++) {
5465 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5466 		}
5467 
5468 		lfa.subcommand_length[0] = 20;
5469 		n.size32[0] = d_id.b24;
5470 		LITTLE_ENDIAN_32(&n.size32[0]);
5471 		lfa.addr[0] = n.size8[0];
5472 		lfa.addr[1] = n.size8[1];
5473 		lfa.addr[2] = n.size8[2];
5474 		lfa.subcommand[1] = 0x71;
5475 		lfa.payload[4] = els.port_control;
5476 		bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5477 
5478 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5479 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5480 		} else {
5481 			pkt->pkt_state = FC_PKT_SUCCESS;
5482 		}
5483 	} else {
5484 		ql_lpc_resp_t rjt;
5485 
5486 		/* Build RJT. */
5487 		bzero(&rjt, sizeof (rjt));
5488 		rjt.ls_code.ls_code = LA_ELS_RJT;
5489 
5490 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5491 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5492 
5493 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5494 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5495 	}
5496 
5497 	if (rval != FC_SUCCESS) {
5498 		EL(ha, "failed, rval = %xh\n", rval);
5499 	} else {
5500 		/*EMPTY*/
5501 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5502 	}
5503 	return (rval);
5504 }
5505 
5506 /*
5507  * ql_els_lsts
5508  *	Issue a extended link service loop status request.
5509  *
5510  * Input:
5511  *	ha = adapter state pointer.
5512  *	pkt = pointer to fc_packet.
5513  *
5514  * Returns:
5515  *	FC_SUCCESS - the packet was accepted for transport.
5516  *	FC_TRANSPORT_ERROR - a transport error occurred.
5517  *
5518  * Context:
5519  *	Kernel context.
5520  */
5521 static int
5522 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5523 {
5524 	ddi_dma_cookie_t	*cp;
5525 	uint32_t		cnt;
5526 	conv_num_t		n;
5527 	port_id_t		d_id;
5528 	int			rval = FC_SUCCESS;
5529 
5530 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5531 
5532 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5533 	if (ha->topology & QL_SNS_CONNECTION) {
5534 		fc_lsts_req_t els;
5535 		lfa_cmd_t lfa;
5536 
5537 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5538 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5539 
5540 		/* Setup LFA mailbox command data. */
5541 		bzero((void *)&lfa, sizeof (lfa_cmd_t));
5542 
5543 		lfa.resp_buffer_length[0] = 84;
5544 
5545 		cp = pkt->pkt_resp_cookie;
5546 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5547 			n.size64 = cp->dmac_laddress;
5548 			LITTLE_ENDIAN_64(&n.size64);
5549 		} else {
5550 			n.size32[0] = cp->dmac_address;
5551 			LITTLE_ENDIAN_32(&n.size32[0]);
5552 			n.size32[1] = 0;
5553 		}
5554 
5555 		/* Set buffer address. */
5556 		for (cnt = 0; cnt < 8; cnt++) {
5557 			lfa.resp_buffer_address[cnt] = n.size8[cnt];
5558 		}
5559 
5560 		lfa.subcommand_length[0] = 2;
5561 		n.size32[0] = d_id.b24;
5562 		LITTLE_ENDIAN_32(&n.size32[0]);
5563 		lfa.addr[0] = n.size8[0];
5564 		lfa.addr[1] = n.size8[1];
5565 		lfa.addr[2] = n.size8[2];
5566 		lfa.subcommand[1] = 0x72;
5567 
5568 		if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5569 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5570 		} else {
5571 			pkt->pkt_state = FC_PKT_SUCCESS;
5572 		}
5573 	} else {
5574 		fc_lsts_resp_t rjt;
5575 
5576 		/* Build RJT. */
5577 		bzero(&rjt, sizeof (rjt));
5578 		rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5579 
5580 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5581 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5582 
5583 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5584 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5585 	}
5586 
5587 	if (rval != FC_SUCCESS) {
5588 		EL(ha, "failed=%xh\n", rval);
5589 	} else {
5590 		/*EMPTY*/
5591 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5592 	}
5593 	return (rval);
5594 }
5595 
5596 /*
5597  * ql_els_scr
5598  *	Issue a extended link service state change registration request.
5599  *
5600  * Input:
5601  *	ha = adapter state pointer.
5602  *	pkt = pointer to fc_packet.
5603  *
5604  * Returns:
5605  *	FC_SUCCESS - the packet was accepted for transport.
5606  *	FC_TRANSPORT_ERROR - a transport error occurred.
5607  *
5608  * Context:
5609  *	Kernel context.
5610  */
5611 static int
5612 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5613 {
5614 	fc_scr_resp_t	acc;
5615 	int		rval = FC_SUCCESS;
5616 
5617 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5618 
5619 	bzero(&acc, sizeof (acc));
5620 	if (ha->topology & QL_SNS_CONNECTION) {
5621 		fc_scr_req_t els;
5622 
5623 		ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5624 		    (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5625 
5626 		if (ql_send_change_request(ha, els.scr_func) ==
5627 		    QL_SUCCESS) {
5628 			/* Build ACC. */
5629 			acc.scr_acc = LA_ELS_ACC;
5630 
5631 			pkt->pkt_state = FC_PKT_SUCCESS;
5632 		} else {
5633 			/* Build RJT. */
5634 			acc.scr_acc = LA_ELS_RJT;
5635 
5636 			pkt->pkt_state = FC_PKT_TRAN_ERROR;
5637 			pkt->pkt_reason = FC_REASON_HW_ERROR;
5638 		}
5639 	} else {
5640 		/* Build RJT. */
5641 		acc.scr_acc = LA_ELS_RJT;
5642 
5643 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5644 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5645 	}
5646 
5647 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5648 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5649 
5650 	if (rval != FC_SUCCESS) {
5651 		EL(ha, "failed, rval = %xh\n", rval);
5652 	} else {
5653 		/*EMPTY*/
5654 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5655 	}
5656 	return (rval);
5657 }
5658 
5659 /*
5660  * ql_els_rscn
5661  *	Issue a extended link service register state
5662  *	change notification request.
5663  *
5664  * Input:
5665  *	ha = adapter state pointer.
5666  *	pkt = pointer to fc_packet.
5667  *
5668  * Returns:
5669  *	FC_SUCCESS - the packet was accepted for transport.
5670  *	FC_TRANSPORT_ERROR - a transport error occurred.
5671  *
5672  * Context:
5673  *	Kernel context.
5674  */
5675 static int
5676 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5677 {
5678 	ql_rscn_resp_t	acc;
5679 	int		rval = FC_SUCCESS;
5680 
5681 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5682 
5683 	bzero(&acc, sizeof (acc));
5684 	if (ha->topology & QL_SNS_CONNECTION) {
5685 		/* Build ACC. */
5686 		acc.scr_acc = LA_ELS_ACC;
5687 
5688 		pkt->pkt_state = FC_PKT_SUCCESS;
5689 	} else {
5690 		/* Build RJT. */
5691 		acc.scr_acc = LA_ELS_RJT;
5692 
5693 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5694 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5695 	}
5696 
5697 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5698 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5699 
5700 	if (rval != FC_SUCCESS) {
5701 		EL(ha, "failed, rval = %xh\n", rval);
5702 	} else {
5703 		/*EMPTY*/
5704 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5705 	}
5706 	return (rval);
5707 }
5708 
5709 /*
5710  * ql_els_farp_req
5711  *	Issue FC Address Resolution Protocol (FARP)
5712  *	extended link service request.
5713  *
5714  *	Note: not supported.
5715  *
5716  * Input:
5717  *	ha = adapter state pointer.
5718  *	pkt = pointer to fc_packet.
5719  *
5720  * Returns:
5721  *	FC_SUCCESS - the packet was accepted for transport.
5722  *	FC_TRANSPORT_ERROR - a transport error occurred.
5723  *
5724  * Context:
5725  *	Kernel context.
5726  */
5727 static int
5728 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
5729 {
5730 	ql_acc_rjt_t	acc;
5731 	int		rval = FC_SUCCESS;
5732 
5733 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5734 
5735 	bzero(&acc, sizeof (acc));
5736 
5737 	/* Build ACC. */
5738 	acc.ls_code.ls_code = LA_ELS_ACC;
5739 
5740 	pkt->pkt_state = FC_PKT_SUCCESS;
5741 
5742 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5743 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5744 
5745 	if (rval != FC_SUCCESS) {
5746 		EL(ha, "failed, rval = %xh\n", rval);
5747 	} else {
5748 		/*EMPTY*/
5749 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5750 	}
5751 	return (rval);
5752 }
5753 
5754 /*
5755  * ql_els_farp_reply
5756  *	Issue FC Address Resolution Protocol (FARP)
5757  *	extended link service reply.
5758  *
5759  *	Note: not supported.
5760  *
5761  * Input:
5762  *	ha = adapter state pointer.
5763  *	pkt = pointer to fc_packet.
5764  *
5765  * Returns:
5766  *	FC_SUCCESS - the packet was accepted for transport.
5767  *	FC_TRANSPORT_ERROR - a transport error occurred.
5768  *
5769  * Context:
5770  *	Kernel context.
5771  */
5772 /* ARGSUSED */
5773 static int
5774 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
5775 {
5776 	ql_acc_rjt_t	acc;
5777 	int		rval = FC_SUCCESS;
5778 
5779 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5780 
5781 	bzero(&acc, sizeof (acc));
5782 
5783 	/* Build ACC. */
5784 	acc.ls_code.ls_code = LA_ELS_ACC;
5785 
5786 	pkt->pkt_state = FC_PKT_SUCCESS;
5787 
5788 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5789 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5790 
5791 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5792 
5793 	return (rval);
5794 }
5795 
5796 static int
5797 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
5798 {
5799 	uchar_t			*rnid_acc;
5800 	port_id_t		d_id;
5801 	ql_link_t		*link;
5802 	ql_tgt_t		*tq;
5803 	uint16_t		index;
5804 	la_els_rnid_acc_t	acc;
5805 	la_els_rnid_t		*req;
5806 	size_t			req_len;
5807 
5808 	req_len =  FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
5809 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5810 	index = ql_alpa_to_index[d_id.b.al_pa];
5811 
5812 	tq = NULL;
5813 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5814 		tq = link->base_address;
5815 		if (tq->d_id.b24 == d_id.b24) {
5816 			break;
5817 		} else {
5818 			tq = NULL;
5819 		}
5820 	}
5821 
5822 	/* Allocate memory for rnid status block */
5823 	rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
5824 	ASSERT(rnid_acc != NULL);
5825 
5826 	bzero(&acc, sizeof (acc));
5827 
5828 	req = (la_els_rnid_t *)pkt->pkt_cmd;
5829 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5830 	    (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
5831 	    (caddr_t)rnid_acc) != QL_SUCCESS)) {
5832 
5833 		kmem_free(rnid_acc, req_len);
5834 		acc.ls_code.ls_code = LA_ELS_RJT;
5835 
5836 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5837 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5838 
5839 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5840 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5841 
5842 		return (FC_FAILURE);
5843 	}
5844 
5845 	acc.ls_code.ls_code = LA_ELS_ACC;
5846 	bcopy(rnid_acc, &acc.hdr, req_len);
5847 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5848 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5849 
5850 	kmem_free(rnid_acc, req_len);
5851 	pkt->pkt_state = FC_PKT_SUCCESS;
5852 
5853 	return (FC_SUCCESS);
5854 }
5855 
5856 static int
5857 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
5858 {
5859 	fc_rls_acc_t		*rls_acc;
5860 	port_id_t		d_id;
5861 	ql_link_t		*link;
5862 	ql_tgt_t		*tq;
5863 	uint16_t		index;
5864 	la_els_rls_acc_t	acc;
5865 
5866 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5867 	index = ql_alpa_to_index[d_id.b.al_pa];
5868 
5869 	tq = NULL;
5870 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
5871 		tq = link->base_address;
5872 		if (tq->d_id.b24 == d_id.b24) {
5873 			break;
5874 		} else {
5875 			tq = NULL;
5876 		}
5877 	}
5878 
5879 	/* Allocate memory for link error status block */
5880 	rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
5881 	ASSERT(rls_acc != NULL);
5882 
5883 	bzero(&acc, sizeof (la_els_rls_acc_t));
5884 
5885 	if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
5886 	    (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
5887 	    (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
5888 
5889 		kmem_free(rls_acc, sizeof (*rls_acc));
5890 		acc.ls_code.ls_code = LA_ELS_RJT;
5891 
5892 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5893 		    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5894 
5895 		pkt->pkt_state = FC_PKT_TRAN_ERROR;
5896 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5897 
5898 		return (FC_FAILURE);
5899 	}
5900 
5901 	LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
5902 	LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
5903 	LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
5904 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
5905 	LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
5906 
5907 	acc.ls_code.ls_code = LA_ELS_ACC;
5908 	acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
5909 	acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
5910 	acc.rls_link_params.rls_sig_loss  = rls_acc->rls_sig_loss;
5911 	acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
5912 	acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
5913 	ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5914 	    (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5915 
5916 	kmem_free(rls_acc, sizeof (*rls_acc));
5917 	pkt->pkt_state = FC_PKT_SUCCESS;
5918 
5919 	return (FC_SUCCESS);
5920 }
5921 
5922 static int
5923 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
5924 {
5925 	port_id_t	d_id;
5926 	ql_srb_t	*sp;
5927 	fc_unsol_buf_t  *ubp;
5928 	ql_link_t	*link, *next_link;
5929 	int		rval = FC_SUCCESS;
5930 	int		cnt = 5;
5931 
5932 	/*
5933 	 * we need to ensure that q->outcnt == 0, otherwise
5934 	 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
5935 	 * will confuse ulps.
5936 	 */
5937 
5938 	DEVICE_QUEUE_LOCK(tq);
5939 	do {
5940 		/*
5941 		 * wait for the cmds to get drained. If they
5942 		 * don't get drained then the transport will
5943 		 * retry PLOGI after few secs.
5944 		 */
5945 		if (tq->outcnt != 0) {
5946 			rval = FC_TRAN_BUSY;
5947 			DEVICE_QUEUE_UNLOCK(tq);
5948 			ql_delay(ha, 10000);
5949 			DEVICE_QUEUE_LOCK(tq);
5950 			cnt--;
5951 			if (!cnt) {
5952 				cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
5953 				    " for %xh outcount %xh", QL_NAME,
5954 				    ha->instance, tq->d_id.b24, tq->outcnt);
5955 			}
5956 		} else {
5957 			rval = FC_SUCCESS;
5958 			break;
5959 		}
5960 	} while (cnt > 0);
5961 	DEVICE_QUEUE_UNLOCK(tq);
5962 
5963 	/*
5964 	 * return, if busy or if the plogi was asynchronous.
5965 	 */
5966 	if ((rval != FC_SUCCESS) ||
5967 	    (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
5968 	    pkt->pkt_comp)) {
5969 		return (rval);
5970 	}
5971 
5972 	/*
5973 	 * Let us give daemon sufficient time and hopefully
5974 	 * when transport retries PLOGI, it would have flushed
5975 	 * callback queue.
5976 	 */
5977 	TASK_DAEMON_LOCK(ha);
5978 	for (link = ha->callback_queue.first; link != NULL;
5979 	    link = next_link) {
5980 		next_link = link->next;
5981 		sp = link->base_address;
5982 		if (sp->flags & SRB_UB_CALLBACK) {
5983 			ubp = ha->ub_array[sp->handle];
5984 			d_id.b24 = ubp->ub_frame.s_id;
5985 		} else {
5986 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
5987 		}
5988 		if (tq->d_id.b24 == d_id.b24) {
5989 			cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
5990 			    ha->instance, tq->d_id.b24);
5991 			rval = FC_TRAN_BUSY;
5992 			break;
5993 		}
5994 	}
5995 	TASK_DAEMON_UNLOCK(ha);
5996 
5997 	return (rval);
5998 }
5999 
6000 /*
6001  * ql_login_port
6002  *	Logs in a device if not already logged in.
6003  *
6004  * Input:
6005  *	ha = adapter state pointer.
6006  *	d_id = 24 bit port ID.
6007  *	DEVICE_QUEUE_LOCK must be released.
6008  *
6009  * Returns:
6010  *	QL local function return status code.
6011  *
6012  * Context:
6013  *	Kernel context.
6014  */
6015 static int
6016 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6017 {
6018 	ql_adapter_state_t	*vha;
6019 	ql_link_t		*link;
6020 	uint16_t		index;
6021 	ql_tgt_t		*tq, *tq2;
6022 	uint16_t		loop_id, first_loop_id, last_loop_id;
6023 	int			rval = QL_SUCCESS;
6024 
6025 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6026 
6027 	/* Get head queue index. */
6028 	index = ql_alpa_to_index[d_id.b.al_pa];
6029 
6030 	/* Check for device already has a queue. */
6031 	tq = NULL;
6032 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6033 		tq = link->base_address;
6034 		if (tq->d_id.b24 == d_id.b24) {
6035 			loop_id = tq->loop_id;
6036 			break;
6037 		} else {
6038 			tq = NULL;
6039 		}
6040 	}
6041 
6042 	/* Let's stop issuing any IO and unsolicited logo */
6043 	if ((tq != NULL) && (!(ddi_in_panic()))) {
6044 		DEVICE_QUEUE_LOCK(tq);
6045 		tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6046 		tq->flags &= ~TQF_RSCN_RCVD;
6047 		DEVICE_QUEUE_UNLOCK(tq);
6048 	}
6049 	if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6050 	    !(tq->flags & TQF_FABRIC_DEVICE)) {
6051 		loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6052 	}
6053 
6054 	/* Special case for Nameserver */
6055 	if (d_id.b24 == 0xFFFFFC) {
6056 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
6057 		    SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6058 		if (tq == NULL) {
6059 			ADAPTER_STATE_LOCK(ha);
6060 			tq = ql_dev_init(ha, d_id, loop_id);
6061 			ADAPTER_STATE_UNLOCK(ha);
6062 			if (tq == NULL) {
6063 				EL(ha, "failed=%xh, d_id=%xh\n",
6064 				    QL_FUNCTION_FAILED, d_id.b24);
6065 				return (QL_FUNCTION_FAILED);
6066 			}
6067 		}
6068 		rval = ql_login_fabric_port(ha, tq, loop_id);
6069 		if (rval == QL_SUCCESS) {
6070 			tq->loop_id = loop_id;
6071 			tq->flags |= TQF_FABRIC_DEVICE;
6072 			(void) ql_get_port_database(ha, tq, PDF_NONE);
6073 			ha->topology = (uint8_t)
6074 			    (ha->topology | QL_SNS_CONNECTION);
6075 		}
6076 	/* Check for device already logged in. */
6077 	} else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6078 		if (tq->flags & TQF_FABRIC_DEVICE) {
6079 			rval = ql_login_fabric_port(ha, tq, loop_id);
6080 			if (rval == QL_PORT_ID_USED) {
6081 				rval = QL_SUCCESS;
6082 			}
6083 		} else if (LOCAL_LOOP_ID(loop_id)) {
6084 			rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6085 			    (tq->flags & TQF_INITIATOR_DEVICE ?
6086 			    LLF_NONE : LLF_PLOGI));
6087 			if (rval == QL_SUCCESS) {
6088 				DEVICE_QUEUE_LOCK(tq);
6089 				tq->loop_id = loop_id;
6090 				DEVICE_QUEUE_UNLOCK(tq);
6091 			}
6092 		}
6093 	} else if (ha->topology & QL_SNS_CONNECTION) {
6094 		/* Locate unused loop ID. */
6095 		if (CFG_IST(ha, CFG_CTRL_2425)) {
6096 			first_loop_id = 0;
6097 			last_loop_id = LAST_N_PORT_HDL;
6098 		} else if (ha->topology & QL_F_PORT) {
6099 			first_loop_id = 0;
6100 			last_loop_id = SNS_LAST_LOOP_ID;
6101 		} else {
6102 			first_loop_id = SNS_FIRST_LOOP_ID;
6103 			last_loop_id = SNS_LAST_LOOP_ID;
6104 		}
6105 
6106 		/* Acquire adapter state lock. */
6107 		ADAPTER_STATE_LOCK(ha);
6108 
6109 		tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6110 		if (tq == NULL) {
6111 			EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6112 			    d_id.b24);
6113 
6114 			ADAPTER_STATE_UNLOCK(ha);
6115 
6116 			return (QL_FUNCTION_FAILED);
6117 		}
6118 
6119 		rval = QL_FUNCTION_FAILED;
6120 		loop_id = ha->pha->free_loop_id++;
6121 		for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6122 		    index--) {
6123 			if (loop_id < first_loop_id ||
6124 			    loop_id > last_loop_id) {
6125 				loop_id = first_loop_id;
6126 				ha->pha->free_loop_id = (uint16_t)
6127 				    (loop_id + 1);
6128 			}
6129 
6130 			/* Bypass if loop ID used. */
6131 			for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6132 				tq2 = ql_loop_id_to_queue(vha, loop_id);
6133 				if (tq2 != NULL && tq2 != tq) {
6134 					break;
6135 				}
6136 			}
6137 			if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6138 			    loop_id == ha->loop_id) {
6139 				loop_id = ha->pha->free_loop_id++;
6140 				continue;
6141 			}
6142 
6143 			ADAPTER_STATE_UNLOCK(ha);
6144 			rval = ql_login_fabric_port(ha, tq, loop_id);
6145 
6146 			/*
6147 			 * If PORT_ID_USED is returned
6148 			 * the login_fabric_port() updates
6149 			 * with the correct loop ID
6150 			 */
6151 			switch (rval) {
6152 			case QL_PORT_ID_USED:
6153 				/*
6154 				 * use f/w handle and try to
6155 				 * login again.
6156 				 */
6157 				ADAPTER_STATE_LOCK(ha);
6158 				ha->pha->free_loop_id--;
6159 				ADAPTER_STATE_UNLOCK(ha);
6160 				loop_id = tq->loop_id;
6161 				break;
6162 			case QL_SUCCESS:
6163 				tq->flags |= TQF_FABRIC_DEVICE;
6164 				(void) ql_get_port_database(ha,
6165 				    tq, PDF_NONE);
6166 				index = 1;
6167 				break;
6168 
6169 			case QL_LOOP_ID_USED:
6170 				tq->loop_id = PORT_NO_LOOP_ID;
6171 				loop_id = ha->pha->free_loop_id++;
6172 				break;
6173 
6174 			case QL_ALL_IDS_IN_USE:
6175 				tq->loop_id = PORT_NO_LOOP_ID;
6176 				index = 1;
6177 				break;
6178 
6179 			default:
6180 				tq->loop_id = PORT_NO_LOOP_ID;
6181 				index = 1;
6182 				break;
6183 			}
6184 
6185 			ADAPTER_STATE_LOCK(ha);
6186 		}
6187 
6188 		ADAPTER_STATE_UNLOCK(ha);
6189 	} else {
6190 		rval = QL_FUNCTION_FAILED;
6191 	}
6192 
6193 	if (rval != QL_SUCCESS) {
6194 		EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6195 	} else {
6196 		EL(ha, "d_id=%xh, loop_id=%xh, "
6197 		    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6198 		    tq->loop_id, tq->port_name[0], tq->port_name[1],
6199 		    tq->port_name[2], tq->port_name[3], tq->port_name[4],
6200 		    tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6201 	}
6202 	return (rval);
6203 }
6204 
6205 /*
6206  * ql_login_fabric_port
6207  *	Issue login fabric port mailbox command.
6208  *
6209  * Input:
6210  *	ha:		adapter state pointer.
6211  *	tq:		target queue pointer.
6212  *	loop_id:	FC Loop ID.
6213  *
6214  * Returns:
6215  *	ql local function return status code.
6216  *
6217  * Context:
6218  *	Kernel context.
6219  */
6220 static int
6221 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6222 {
6223 	int		rval;
6224 	int		index;
6225 	int		retry = 0;
6226 	port_id_t	d_id;
6227 	ql_tgt_t	*newq;
6228 	ql_mbx_data_t	mr;
6229 
6230 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6231 
6232 	/*
6233 	 * QL_PARAMETER_ERROR also means the firmware is
6234 	 * not able to allocate PCB entry due to resource
6235 	 * issues, or collision.
6236 	 */
6237 	do {
6238 		rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6239 		if ((rval == QL_PARAMETER_ERROR) ||
6240 		    ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6241 		    mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6242 			retry++;
6243 			drv_usecwait(10 * MILLISEC);
6244 		} else {
6245 			break;
6246 		}
6247 	} while (retry < 5);
6248 
6249 	switch (rval) {
6250 	case QL_SUCCESS:
6251 		tq->loop_id = loop_id;
6252 		break;
6253 
6254 	case QL_PORT_ID_USED:
6255 		/*
6256 		 * This Loop ID should NOT be in use in drivers
6257 		 */
6258 		newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6259 
6260 		if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6261 			cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6262 			    "dup loop_id=%xh, d_id=%xh", ha->instance,
6263 			    newq->loop_id, newq->d_id.b24);
6264 			ql_send_logo(ha, newq, NULL);
6265 		}
6266 
6267 		tq->loop_id = mr.mb[1];
6268 		break;
6269 
6270 	case QL_LOOP_ID_USED:
6271 		d_id.b.al_pa = LSB(mr.mb[2]);
6272 		d_id.b.area = MSB(mr.mb[2]);
6273 		d_id.b.domain = LSB(mr.mb[1]);
6274 
6275 		newq = ql_d_id_to_queue(ha, d_id);
6276 		if (newq && (newq->loop_id != loop_id)) {
6277 			/*
6278 			 * This should NEVER ever happen; but this
6279 			 * code is needed to bail out when the worst
6280 			 * case happens - or as used to happen before
6281 			 */
6282 			ASSERT(newq->d_id.b24 == d_id.b24);
6283 
6284 			QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6285 			    "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6286 			    "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6287 			    ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6288 			    newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6289 			    newq->d_id.b24, loop_id);
6290 
6291 			if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6292 				ADAPTER_STATE_LOCK(ha);
6293 
6294 				index = ql_alpa_to_index[newq->d_id.b.al_pa];
6295 				ql_add_link_b(&ha->dev[index], &newq->device);
6296 
6297 				newq->d_id.b24 = d_id.b24;
6298 
6299 				index = ql_alpa_to_index[d_id.b.al_pa];
6300 				ql_add_link_b(&ha->dev[index], &newq->device);
6301 
6302 				ADAPTER_STATE_UNLOCK(ha);
6303 			}
6304 
6305 			(void) ql_get_port_database(ha, newq, PDF_NONE);
6306 
6307 		}
6308 
6309 		/*
6310 		 * Invalidate the loop ID for the
6311 		 * us to obtain a new one.
6312 		 */
6313 		tq->loop_id = PORT_NO_LOOP_ID;
6314 		break;
6315 
6316 	case QL_ALL_IDS_IN_USE:
6317 		rval = QL_FUNCTION_FAILED;
6318 		EL(ha, "no loop id's available\n");
6319 		break;
6320 
6321 	default:
6322 		if (rval == QL_COMMAND_ERROR) {
6323 			switch (mr.mb[1]) {
6324 			case 2:
6325 			case 3:
6326 				rval = QL_MEMORY_ALLOC_FAILED;
6327 				break;
6328 
6329 			case 4:
6330 				rval = QL_FUNCTION_TIMEOUT;
6331 				break;
6332 			case 7:
6333 				rval = QL_FABRIC_NOT_INITIALIZED;
6334 				break;
6335 			default:
6336 				EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6337 				break;
6338 			}
6339 		} else {
6340 			cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6341 			    " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6342 			    ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6343 		}
6344 		break;
6345 	}
6346 
6347 	if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6348 	    rval != QL_LOOP_ID_USED) {
6349 		EL(ha, "failed=%xh\n", rval);
6350 	} else {
6351 		/*EMPTY*/
6352 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6353 	}
6354 	return (rval);
6355 }
6356 
6357 /*
6358  * ql_logout_port
6359  *	Logs out a device if possible.
6360  *
6361  * Input:
6362  *	ha:	adapter state pointer.
6363  *	d_id:	24 bit port ID.
6364  *
6365  * Returns:
6366  *	QL local function return status code.
6367  *
6368  * Context:
6369  *	Kernel context.
6370  */
6371 static int
6372 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6373 {
6374 	ql_link_t	*link;
6375 	ql_tgt_t	*tq;
6376 	uint16_t	index;
6377 
6378 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6379 
6380 	/* Get head queue index. */
6381 	index = ql_alpa_to_index[d_id.b.al_pa];
6382 
6383 	/* Get device queue. */
6384 	tq = NULL;
6385 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6386 		tq = link->base_address;
6387 		if (tq->d_id.b24 == d_id.b24) {
6388 			break;
6389 		} else {
6390 			tq = NULL;
6391 		}
6392 	}
6393 
6394 	if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6395 		(void) ql_logout_fabric_port(ha, tq);
6396 		tq->loop_id = PORT_NO_LOOP_ID;
6397 	}
6398 
6399 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6400 
6401 	return (QL_SUCCESS);
6402 }
6403 
6404 /*
6405  * ql_dev_init
6406  *	Initialize/allocate device queue.
6407  *
6408  * Input:
6409  *	ha:		adapter state pointer.
6410  *	d_id:		device destination ID
6411  *	loop_id:	device loop ID
6412  *	ADAPTER_STATE_LOCK must be already obtained.
6413  *
6414  * Returns:
6415  *	NULL = failure
6416  *
6417  * Context:
6418  *	Kernel context.
6419  */
6420 ql_tgt_t *
6421 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6422 {
6423 	ql_link_t	*link;
6424 	uint16_t	index;
6425 	ql_tgt_t	*tq;
6426 
6427 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6428 	    ha->instance, d_id.b24, loop_id);
6429 
6430 	index = ql_alpa_to_index[d_id.b.al_pa];
6431 
6432 	/* If device queue exists, set proper loop ID. */
6433 	tq = NULL;
6434 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
6435 		tq = link->base_address;
6436 		if (tq->d_id.b24 == d_id.b24) {
6437 			tq->loop_id = loop_id;
6438 
6439 			/* Reset port down retry count. */
6440 			tq->port_down_retry_count = ha->port_down_retry_count;
6441 			tq->qfull_retry_count = ha->qfull_retry_count;
6442 
6443 			break;
6444 		} else {
6445 			tq = NULL;
6446 		}
6447 	}
6448 
6449 	/* If device does not have queue. */
6450 	if (tq == NULL) {
6451 		tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6452 		if (tq != NULL) {
6453 			/*
6454 			 * mutex to protect the device queue,
6455 			 * does not block interrupts.
6456 			 */
6457 			mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6458 			    (ha->iflags & IFLG_INTR_AIF) ?
6459 			    (void *)(uintptr_t)ha->intr_pri :
6460 			    (void *)(uintptr_t)ha->iblock_cookie);
6461 
6462 			tq->d_id.b24 = d_id.b24;
6463 			tq->loop_id = loop_id;
6464 			tq->device.base_address = tq;
6465 			tq->iidma_rate = IIDMA_RATE_INIT;
6466 
6467 			/* Reset port down retry count. */
6468 			tq->port_down_retry_count = ha->port_down_retry_count;
6469 			tq->qfull_retry_count = ha->qfull_retry_count;
6470 
6471 			/* Add device to device queue. */
6472 			ql_add_link_b(&ha->dev[index], &tq->device);
6473 		}
6474 	}
6475 
6476 	if (tq == NULL) {
6477 		EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6478 	} else {
6479 		/*EMPTY*/
6480 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6481 	}
6482 	return (tq);
6483 }
6484 
6485 /*
6486  * ql_dev_free
6487  *	Remove queue from device list and frees resources used by queue.
6488  *
6489  * Input:
6490  *	ha:	adapter state pointer.
6491  *	tq:	target queue pointer.
6492  *	ADAPTER_STATE_LOCK must be already obtained.
6493  *
6494  * Context:
6495  *	Kernel context.
6496  */
6497 static void
6498 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6499 {
6500 	ql_link_t	*link;
6501 	uint16_t	index;
6502 	ql_lun_t	*lq;
6503 
6504 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6505 
6506 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6507 		lq = link->base_address;
6508 		if (lq->cmd.first != NULL) {
6509 			return;
6510 		}
6511 	}
6512 
6513 	if (tq->outcnt == 0) {
6514 		/* Get head queue index. */
6515 		index = ql_alpa_to_index[tq->d_id.b.al_pa];
6516 		for (link = ha->dev[index].first; link != NULL;
6517 		    link = link->next) {
6518 			if (link->base_address == tq) {
6519 				ql_remove_link(&ha->dev[index], link);
6520 
6521 				for (link = tq->lun_queues.first;
6522 				    link != NULL; /* CSTYLE */) {
6523 					lq = link->base_address;
6524 					link = link->next;
6525 
6526 					ql_remove_link(&tq->lun_queues,
6527 					    &lq->link);
6528 					kmem_free(lq, sizeof (ql_lun_t));
6529 				}
6530 
6531 				mutex_destroy(&tq->mutex);
6532 				kmem_free(tq, sizeof (ql_tgt_t));
6533 				break;
6534 			}
6535 		}
6536 	}
6537 
6538 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6539 }
6540 
6541 /*
6542  * ql_lun_queue
6543  *	Allocate LUN queue if does not exists.
6544  *
6545  * Input:
6546  *	ha:	adapter state pointer.
6547  *	tq:	target queue.
6548  *	lun:	LUN number.
6549  *
6550  * Returns:
6551  *	NULL = failure
6552  *
6553  * Context:
6554  *	Kernel context.
6555  */
6556 static ql_lun_t *
6557 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6558 {
6559 	ql_lun_t	*lq;
6560 	ql_link_t	*link;
6561 
6562 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6563 
6564 	/* Fast path. */
6565 	if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6566 		QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6567 		return (tq->last_lun_queue);
6568 	}
6569 
6570 	if (lun >= MAX_LUNS) {
6571 		EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6572 		return (NULL);
6573 	}
6574 	/* If device queue exists, set proper loop ID. */
6575 	lq = NULL;
6576 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6577 		lq = link->base_address;
6578 		if (lq->lun_no == lun) {
6579 			QL_PRINT_3(CE_CONT, "(%d): found done\n",
6580 			    ha->instance);
6581 			tq->last_lun_queue = lq;
6582 			return (lq);
6583 		}
6584 	}
6585 
6586 	/* If queue does exist. */
6587 	lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6588 
6589 	/* Initialize LUN queue. */
6590 	if (lq != NULL) {
6591 		lq->link.base_address = lq;
6592 
6593 		lq->lun_no = lun;
6594 		lq->target_queue = tq;
6595 
6596 		DEVICE_QUEUE_LOCK(tq);
6597 		ql_add_link_b(&tq->lun_queues, &lq->link);
6598 		DEVICE_QUEUE_UNLOCK(tq);
6599 		tq->last_lun_queue = lq;
6600 	}
6601 
6602 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6603 
6604 	return (lq);
6605 }
6606 
6607 /*
6608  * ql_fcp_scsi_cmd
6609  *	Process fibre channel (FCP) SCSI protocol commands.
6610  *
6611  * Input:
6612  *	ha = adapter state pointer.
6613  *	pkt = pointer to fc_packet.
6614  *	sp = srb pointer.
6615  *
6616  * Returns:
6617  *	FC_SUCCESS - the packet was accepted for transport.
6618  *	FC_TRANSPORT_ERROR - a transport error occurred.
6619  *
6620  * Context:
6621  *	Kernel context.
6622  */
6623 static int
6624 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6625 {
6626 	port_id_t	d_id;
6627 	ql_tgt_t	*tq;
6628 	uint64_t	*ptr;
6629 	uint16_t	lun;
6630 
6631 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6632 
6633 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6634 	if (tq == NULL) {
6635 		d_id.r.rsvd_1 = 0;
6636 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6637 		tq = ql_d_id_to_queue(ha, d_id);
6638 	}
6639 
6640 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6641 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6642 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6643 
6644 	if (tq != NULL &&
6645 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6646 
6647 		/*
6648 		 * zero out FCP response; 24 Bytes
6649 		 */
6650 		ptr = (uint64_t *)pkt->pkt_resp;
6651 		*ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6652 
6653 		/* Handle task management function. */
6654 		if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6655 		    sp->fcp->fcp_cntl.cntl_clr_aca |
6656 		    sp->fcp->fcp_cntl.cntl_reset_tgt |
6657 		    sp->fcp->fcp_cntl.cntl_reset_lun |
6658 		    sp->fcp->fcp_cntl.cntl_clr_tsk |
6659 		    sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6660 			ql_task_mgmt(ha, tq, pkt, sp);
6661 		} else {
6662 			ha->pha->xioctl->IosRequested++;
6663 			ha->pha->xioctl->BytesRequested += (uint32_t)
6664 			    sp->fcp->fcp_data_len;
6665 
6666 			/*
6667 			 * Setup for commands with data transfer
6668 			 */
6669 			sp->iocb = ha->fcp_cmd;
6670 			if (sp->fcp->fcp_data_len != 0) {
6671 				/*
6672 				 * FCP data is bound to pkt_data_dma
6673 				 */
6674 				if (sp->fcp->fcp_cntl.cntl_write_data) {
6675 					(void) ddi_dma_sync(pkt->pkt_data_dma,
6676 					    0, 0, DDI_DMA_SYNC_FORDEV);
6677 				}
6678 
6679 				/* Setup IOCB count. */
6680 				if (pkt->pkt_data_cookie_cnt > ha->cmd_segs) {
6681 					uint32_t	cnt;
6682 
6683 					cnt = pkt->pkt_data_cookie_cnt -
6684 					    ha->cmd_segs;
6685 					sp->req_cnt = (uint16_t)
6686 					    (cnt / ha->cmd_cont_segs);
6687 					if (cnt % ha->cmd_cont_segs) {
6688 						sp->req_cnt = (uint16_t)
6689 						    (sp->req_cnt + 2);
6690 					} else {
6691 						sp->req_cnt++;
6692 					}
6693 				} else {
6694 					sp->req_cnt = 1;
6695 				}
6696 			} else {
6697 				sp->req_cnt = 1;
6698 			}
6699 			QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6700 
6701 			return (ql_start_cmd(ha, tq, pkt, sp));
6702 		}
6703 	} else {
6704 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6705 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6706 
6707 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6708 			ql_awaken_task_daemon(ha, sp, 0, 0);
6709 	}
6710 
6711 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6712 
6713 	return (FC_SUCCESS);
6714 }
6715 
6716 /*
6717  * ql_task_mgmt
6718  *	Task management function processor.
6719  *
6720  * Input:
6721  *	ha:	adapter state pointer.
6722  *	tq:	target queue pointer.
6723  *	pkt:	pointer to fc_packet.
6724  *	sp:	SRB pointer.
6725  *
6726  * Context:
6727  *	Kernel context.
6728  */
6729 static void
6730 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
6731     ql_srb_t *sp)
6732 {
6733 	fcp_rsp_t		*fcpr;
6734 	struct fcp_rsp_info	*rsp;
6735 	uint16_t		lun;
6736 
6737 	ASSERT(pkt->pkt_cmd_dma == NULL && pkt->pkt_resp_dma == NULL);
6738 
6739 	fcpr = (fcp_rsp_t *)pkt->pkt_resp;
6740 	rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
6741 
6742 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6743 
6744 	bzero(fcpr, pkt->pkt_rsplen);
6745 
6746 	fcpr->fcp_u.fcp_status.rsp_len_set = 1;
6747 	fcpr->fcp_response_len = 8;
6748 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6749 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6750 
6751 	if (sp->fcp->fcp_cntl.cntl_clr_aca) {
6752 		if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
6753 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6754 		}
6755 	} else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
6756 		if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
6757 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6758 		}
6759 	} else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
6760 		if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
6761 		    QL_SUCCESS) {
6762 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6763 		}
6764 	} else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
6765 		if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
6766 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6767 		}
6768 	} else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
6769 		if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
6770 			rsp->rsp_code = FCP_TASK_MGMT_FAILED;
6771 		}
6772 	} else {
6773 		rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
6774 	}
6775 
6776 	pkt->pkt_state = FC_PKT_SUCCESS;
6777 
6778 	/* Do command callback. */
6779 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
6780 		ql_awaken_task_daemon(ha, sp, 0, 0);
6781 	}
6782 
6783 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6784 }
6785 
6786 /*
6787  * ql_fcp_ip_cmd
6788  *	Process fibre channel (FCP) Internet (IP) protocols commands.
6789  *
6790  * Input:
6791  *	ha:	adapter state pointer.
6792  *	pkt:	pointer to fc_packet.
6793  *	sp:	SRB pointer.
6794  *
6795  * Returns:
6796  *	FC_SUCCESS - the packet was accepted for transport.
6797  *	FC_TRANSPORT_ERROR - a transport error occurred.
6798  *
6799  * Context:
6800  *	Kernel context.
6801  */
6802 static int
6803 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6804 {
6805 	port_id_t	d_id;
6806 	ql_tgt_t	*tq;
6807 
6808 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6809 
6810 	tq = (ql_tgt_t *)pkt->pkt_fca_device;
6811 	if (tq == NULL) {
6812 		d_id.r.rsvd_1 = 0;
6813 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6814 		tq = ql_d_id_to_queue(ha, d_id);
6815 	}
6816 
6817 	if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
6818 		/*
6819 		 * IP data is bound to pkt_cmd_dma
6820 		 */
6821 		(void) ddi_dma_sync(pkt->pkt_cmd_dma,
6822 		    0, 0, DDI_DMA_SYNC_FORDEV);
6823 
6824 		/* Setup IOCB count. */
6825 		sp->iocb = ha->ip_cmd;
6826 		if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
6827 			uint32_t	cnt;
6828 
6829 			cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
6830 			sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
6831 			if (cnt % ha->cmd_cont_segs) {
6832 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6833 			} else {
6834 				sp->req_cnt++;
6835 			}
6836 		} else {
6837 			sp->req_cnt = 1;
6838 		}
6839 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6840 
6841 		return (ql_start_cmd(ha, tq, pkt, sp));
6842 	} else {
6843 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6844 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6845 
6846 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6847 			ql_awaken_task_daemon(ha, sp, 0, 0);
6848 	}
6849 
6850 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6851 
6852 	return (FC_SUCCESS);
6853 }
6854 
6855 /*
6856  * ql_fcp_data_rsp
6857  *	Process fibre channel protocol (FCP) data and response.
6858  *
6859  * Input:
6860  *	ha:	adapter state pointer.
6861  *	pkt:	pointer to fc_packet.
6862  *	sp:	SRB pointer.
6863  *
6864  * Returns:
6865  *	FC_SUCCESS - the packet was accepted for transport.
6866  *	FC_TRANSPORT_ERROR - a transport error occurred.
6867  *
6868  * Context:
6869  *	Kernel context.
6870  */
6871 static int
6872 ql_fcp_data_rsp(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6873 {
6874 	port_id_t	d_id;
6875 	ql_tgt_t	*tq;
6876 	uint16_t	lun;
6877 
6878 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6879 
6880 	d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6881 	tq = ql_d_id_to_queue(ha, d_id);
6882 
6883 	sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6884 	lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6885 	    hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6886 
6887 	if (tq != NULL &&
6888 	    (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6889 		sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6890 
6891 		/*
6892 		 * Setup for commands with data transfer
6893 		 */
6894 		if (pkt->pkt_cmdlen != 0 &&
6895 		    ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) ||
6896 		    sp->flags & SRB_FCP_RSP_PKT)) {
6897 			(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
6898 			    DDI_DMA_SYNC_FORDEV);
6899 		}
6900 
6901 		/* Setup IOCB count. */
6902 		sp->iocb = ha->ctio_cmd;
6903 		sp->req_cnt = 1;
6904 
6905 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6906 
6907 		return (ql_start_cmd(ha, tq, pkt, sp));
6908 	} else {
6909 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
6910 		pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6911 
6912 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
6913 			ql_awaken_task_daemon(ha, sp, 0, 0);
6914 	}
6915 
6916 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6917 
6918 	return (FC_SUCCESS);
6919 }
6920 
6921 /*
6922  * ql_fc_services
6923  *	Process fibre channel services (name server).
6924  *
6925  * Input:
6926  *	ha:	adapter state pointer.
6927  *	pkt:	pointer to fc_packet.
6928  *
6929  * Returns:
6930  *	FC_SUCCESS - the packet was accepted for transport.
6931  *	FC_TRANSPORT_ERROR - a transport error occurred.
6932  *
6933  * Context:
6934  *	Kernel context.
6935  */
6936 static int
6937 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
6938 {
6939 	uint32_t	cnt;
6940 	fc_ct_header_t	hdr;
6941 	la_els_rjt_t	rjt;
6942 	port_id_t	d_id;
6943 	ql_tgt_t	*tq;
6944 	ql_srb_t	*sp;
6945 	int		rval;
6946 
6947 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6948 
6949 	ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
6950 	    (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
6951 
6952 	bzero(&rjt, sizeof (rjt));
6953 
6954 	/* Do some sanity checks */
6955 	cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
6956 	    sizeof (fc_ct_header_t));
6957 	ASSERT(cnt <= (uint32_t)pkt->pkt_rsplen);
6958 	if (cnt > (uint32_t)pkt->pkt_rsplen) {
6959 		EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
6960 		    pkt->pkt_rsplen);
6961 		return (FC_ELS_MALFORMED);
6962 	}
6963 
6964 	switch (hdr.ct_fcstype) {
6965 	case FCSTYPE_DIRECTORY:
6966 	case FCSTYPE_MGMTSERVICE:
6967 		/* An FCA must make sure that the header is in big endian */
6968 		ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
6969 
6970 		d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6971 		tq = ql_d_id_to_queue(ha, d_id);
6972 		sp = (ql_srb_t *)pkt->pkt_fca_private;
6973 		if (tq == NULL ||
6974 		    (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
6975 			pkt->pkt_state = FC_PKT_LOCAL_RJT;
6976 			pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6977 			rval = QL_SUCCESS;
6978 			break;
6979 		}
6980 
6981 		/*
6982 		 * Services data is bound to pkt_cmd_dma
6983 		 */
6984 		(void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
6985 		    DDI_DMA_SYNC_FORDEV);
6986 
6987 		sp->flags |= SRB_MS_PKT;
6988 		sp->retry_count = 32;
6989 
6990 		/* Setup IOCB count. */
6991 		sp->iocb = ha->ms_cmd;
6992 		if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
6993 			cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
6994 			sp->req_cnt =
6995 			    (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
6996 			if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
6997 				sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
6998 			} else {
6999 				sp->req_cnt++;
7000 			}
7001 		} else {
7002 			sp->req_cnt = 1;
7003 		}
7004 		rval = ql_start_cmd(ha, tq, pkt, sp);
7005 
7006 		QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7007 		    ha->instance, rval);
7008 
7009 		return (rval);
7010 
7011 	default:
7012 		EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7013 		rval = QL_FUNCTION_PARAMETER_ERROR;
7014 		break;
7015 	}
7016 
7017 	if (rval != QL_SUCCESS) {
7018 
7019 		/* Build RJT. */
7020 		rjt.ls_code.ls_code = LA_ELS_RJT;
7021 		rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7022 
7023 		ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7024 		    (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7025 
7026 		pkt->pkt_state = FC_PKT_LOCAL_RJT;
7027 		pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7028 	}
7029 
7030 	/* Do command callback. */
7031 	if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7032 		ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7033 		    0, 0);
7034 	}
7035 
7036 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7037 
7038 	return (FC_SUCCESS);
7039 }
7040 
7041 /*
7042  * ql_cthdr_endian
7043  *	Change endianess of ct passthrough header and payload.
7044  *
7045  * Input:
7046  *	acc_handle:	DMA buffer access handle.
7047  *	ct_hdr:		Pointer to header.
7048  *	restore:	Restore first flag.
7049  *
7050  * Context:
7051  *	Interrupt or Kernel context, no mailbox commands allowed.
7052  */
7053 void
7054 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7055     boolean_t restore)
7056 {
7057 	uint8_t		i, *bp;
7058 	fc_ct_header_t	hdr;
7059 	uint32_t	*hdrp = (uint32_t *)&hdr;
7060 
7061 	ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7062 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7063 
7064 	if (restore) {
7065 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7066 			*hdrp = BE_32(*hdrp);
7067 			hdrp++;
7068 		}
7069 	}
7070 
7071 	if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7072 		bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7073 
7074 		switch (hdr.ct_cmdrsp) {
7075 		case NS_GA_NXT:
7076 		case NS_GPN_ID:
7077 		case NS_GNN_ID:
7078 		case NS_GCS_ID:
7079 		case NS_GFT_ID:
7080 		case NS_GSPN_ID:
7081 		case NS_GPT_ID:
7082 		case NS_GID_FT:
7083 		case NS_GID_PT:
7084 		case NS_RPN_ID:
7085 		case NS_RNN_ID:
7086 		case NS_RSPN_ID:
7087 		case NS_DA_ID:
7088 			BIG_ENDIAN_32(bp);
7089 			break;
7090 		case NS_RFT_ID:
7091 		case NS_RCS_ID:
7092 		case NS_RPT_ID:
7093 			BIG_ENDIAN_32(bp);
7094 			bp += 4;
7095 			BIG_ENDIAN_32(bp);
7096 			break;
7097 		case NS_GNN_IP:
7098 		case NS_GIPA_IP:
7099 			BIG_ENDIAN(bp, 16);
7100 			break;
7101 		case NS_RIP_NN:
7102 			bp += 8;
7103 			BIG_ENDIAN(bp, 16);
7104 			break;
7105 		case NS_RIPA_NN:
7106 			bp += 8;
7107 			BIG_ENDIAN_64(bp);
7108 			break;
7109 		default:
7110 			break;
7111 		}
7112 	}
7113 
7114 	if (restore == B_FALSE) {
7115 		for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7116 			*hdrp = BE_32(*hdrp);
7117 			hdrp++;
7118 		}
7119 	}
7120 
7121 	ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7122 	    (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7123 }
7124 
7125 /*
7126  * ql_start_cmd
7127  *	Finishes starting fibre channel protocol (FCP) command.
7128  *
7129  * Input:
7130  *	ha:	adapter state pointer.
7131  *	tq:	target queue pointer.
7132  *	pkt:	pointer to fc_packet.
7133  *	sp:	SRB pointer.
7134  *
7135  * Context:
7136  *	Kernel context.
7137  */
7138 static int
7139 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7140     ql_srb_t *sp)
7141 {
7142 	int		rval = FC_SUCCESS;
7143 	time_t		poll_wait = 0;
7144 	ql_lun_t	*lq = sp->lun_queue;
7145 
7146 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7147 
7148 	sp->handle = 0;
7149 
7150 	/* Set poll for finish. */
7151 	if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7152 		sp->flags |= SRB_POLL;
7153 		if (pkt->pkt_timeout == 0) {
7154 			pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7155 		}
7156 	}
7157 
7158 	/* Acquire device queue lock. */
7159 	DEVICE_QUEUE_LOCK(tq);
7160 
7161 	/*
7162 	 * If we need authentication, report device busy to
7163 	 * upper layers to retry later
7164 	 */
7165 	if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7166 		DEVICE_QUEUE_UNLOCK(tq);
7167 		EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7168 		    tq->d_id.b24);
7169 		return (FC_DEVICE_BUSY);
7170 	}
7171 
7172 	/* Insert command onto watchdog queue. */
7173 	if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7174 		ql_timeout_insert(ha, tq, sp);
7175 	} else {
7176 		/*
7177 		 * Run dump requests in polled mode as kernel threads
7178 		 * and interrupts may have been disabled.
7179 		 */
7180 		sp->flags |= SRB_POLL;
7181 		sp->init_wdg_q_time = 0;
7182 		sp->isp_timeout = 0;
7183 	}
7184 
7185 	/* If a polling command setup wait time. */
7186 	if (sp->flags & SRB_POLL) {
7187 		if (sp->flags & SRB_WATCHDOG_ENABLED) {
7188 			poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7189 		} else {
7190 			poll_wait = pkt->pkt_timeout;
7191 		}
7192 		ASSERT(poll_wait != 0);
7193 	}
7194 
7195 	if (ha->pha->flags & COMMAND_ABORT_TIMEOUT &&
7196 	    (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7197 		/* Set ending status. */
7198 		sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7199 
7200 		/* Call done routine to handle completions. */
7201 		sp->cmd.next = NULL;
7202 		DEVICE_QUEUE_UNLOCK(tq);
7203 		ql_done(&sp->cmd);
7204 	} else {
7205 		if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7206 			int do_lip = 0;
7207 
7208 			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7209 
7210 			DEVICE_QUEUE_UNLOCK(tq);
7211 
7212 			ADAPTER_STATE_LOCK(ha);
7213 			if ((do_lip = ha->pha->lip_on_panic) == 0) {
7214 				ha->pha->lip_on_panic++;
7215 			}
7216 			ADAPTER_STATE_UNLOCK(ha);
7217 
7218 			if (!do_lip) {
7219 
7220 				/*
7221 				 * That Qlogic F/W performs PLOGI, PRLI, etc
7222 				 * is helpful here. If a PLOGI fails for some
7223 				 * reason, you would get CS_PORT_LOGGED_OUT
7224 				 * or some such error; and we should get a
7225 				 * careful polled mode login kicked off inside
7226 				 * of this driver itself. You don't have FC
7227 				 * transport's services as all threads are
7228 				 * suspended, interrupts disabled, and so
7229 				 * on. Right now we do re-login if the packet
7230 				 * state isn't FC_PKT_SUCCESS.
7231 				 */
7232 				(void) ql_abort_isp(ha);
7233 			}
7234 
7235 			ql_start_iocb(ha, sp);
7236 		} else {
7237 			/* Add the command to the device queue */
7238 			if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7239 				ql_add_link_t(&lq->cmd, &sp->cmd);
7240 			} else {
7241 				ql_add_link_b(&lq->cmd, &sp->cmd);
7242 			}
7243 
7244 			sp->flags |= SRB_IN_DEVICE_QUEUE;
7245 
7246 			/* Check whether next message can be processed */
7247 			ql_next(ha, lq);
7248 		}
7249 	}
7250 
7251 	/* If polling, wait for finish. */
7252 	if (poll_wait) {
7253 		ASSERT(sp->flags & SRB_POLL);
7254 
7255 		if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7256 			int	res;
7257 
7258 			res = ql_abort((opaque_t)ha, pkt, 0);
7259 			if (res != FC_SUCCESS && res != FC_ABORTED) {
7260 				ASSERT(res == FC_OFFLINE ||
7261 				    res == FC_ABORT_FAILED);
7262 
7263 				DEVICE_QUEUE_LOCK(tq);
7264 				ql_remove_link(&lq->cmd, &sp->cmd);
7265 				sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7266 				DEVICE_QUEUE_UNLOCK(tq);
7267 			}
7268 		}
7269 
7270 		if (pkt->pkt_state != FC_PKT_SUCCESS) {
7271 			EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7272 			rval = FC_TRANSPORT_ERROR;
7273 		}
7274 
7275 		ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
7276 		    SRB_IN_TOKEN_ARRAY)) == 0);
7277 
7278 		if (ddi_in_panic()) {
7279 			ASSERT(ha->pha->outstanding_cmds[0] == NULL);
7280 			if (pkt->pkt_state != FC_PKT_SUCCESS) {
7281 				port_id_t d_id;
7282 
7283 				/*
7284 				 * successful LOGIN implies by design
7285 				 * that PRLI also succeeded for disks
7286 				 * Note also that there is no special
7287 				 * mailbox command to send PRLI.
7288 				 */
7289 				d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7290 				(void) ql_login_port(ha, d_id);
7291 			}
7292 		}
7293 
7294 		/*
7295 		 * This should only happen during CPR dumping
7296 		 */
7297 		if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7298 		    pkt->pkt_comp) {
7299 			ASSERT(pkt->pkt_tran_flags & FC_TRAN_DUMPING);
7300 			sp->flags &= ~SRB_POLL;
7301 			(*pkt->pkt_comp)(pkt);
7302 		}
7303 	}
7304 
7305 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7306 
7307 	return (rval);
7308 }
7309 
7310 /*
7311  * ql_poll_cmd
7312  *	Polls commands for completion.
7313  *
7314  * Input:
7315  *	ha = adapter state pointer.
7316  *	sp = SRB command pointer.
7317  *	poll_wait = poll wait time in seconds.
7318  *
7319  * Returns:
7320  *	QL local function return status code.
7321  *
7322  * Context:
7323  *	Kernel context.
7324  */
7325 static int
7326 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7327 {
7328 	int			rval = QL_SUCCESS;
7329 	time_t			msecs_left = poll_wait * 100;	/* 10ms inc */
7330 	ql_adapter_state_t	*ha = vha->pha;
7331 
7332 	while (sp->flags & SRB_POLL) {
7333 
7334 		if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7335 		    ha->idle_timer >= 15 || ddi_in_panic()) {
7336 
7337 			/* If waiting for restart, do it now. */
7338 			if (ha->port_retry_timer != 0) {
7339 				ADAPTER_STATE_LOCK(ha);
7340 				ha->port_retry_timer = 0;
7341 				ADAPTER_STATE_UNLOCK(ha);
7342 
7343 				TASK_DAEMON_LOCK(ha);
7344 				ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7345 				TASK_DAEMON_UNLOCK(ha);
7346 			}
7347 
7348 			if ((CFG_IST(ha, CFG_CTRL_2425) ?
7349 			    RD32_IO_REG(ha, istatus) :
7350 			    RD16_IO_REG(ha, istatus)) & RISC_INT) {
7351 				(void) ql_isr((caddr_t)ha);
7352 				INTR_LOCK(ha);
7353 				ha->intr_claimed = TRUE;
7354 				INTR_UNLOCK(ha);
7355 			}
7356 
7357 			/*
7358 			 * Call task thread function in case the
7359 			 * daemon is not running.
7360 			 */
7361 			TASK_DAEMON_LOCK(ha);
7362 
7363 			if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7364 			    QL_TASK_PENDING(ha)) {
7365 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7366 				ql_task_thread(ha);
7367 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7368 			}
7369 
7370 			TASK_DAEMON_UNLOCK(ha);
7371 		}
7372 
7373 		if (msecs_left < 10) {
7374 			rval = QL_FUNCTION_TIMEOUT;
7375 			break;
7376 		}
7377 
7378 		/*
7379 		 * Polling interval is 10 milli seconds; Increasing
7380 		 * the polling interval to seconds since disk IO
7381 		 * timeout values are ~60 seconds is tempting enough,
7382 		 * but CPR dump time increases, and so will the crash
7383 		 * dump time; Don't toy with the settings without due
7384 		 * consideration for all the scenarios that will be
7385 		 * impacted.
7386 		 */
7387 		ql_delay(ha, 10000);
7388 		msecs_left -= 10;
7389 	}
7390 
7391 	return (rval);
7392 }
7393 
7394 /*
7395  * ql_next
7396  *	Retrieve and process next job in the device queue.
7397  *
7398  * Input:
7399  *	ha:	adapter state pointer.
7400  *	lq:	LUN queue pointer.
7401  *	DEVICE_QUEUE_LOCK must be already obtained.
7402  *
7403  * Output:
7404  *	Releases DEVICE_QUEUE_LOCK upon exit.
7405  *
7406  * Context:
7407  *	Interrupt or Kernel context, no mailbox commands allowed.
7408  */
7409 void
7410 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7411 {
7412 	ql_srb_t		*sp;
7413 	ql_link_t		*link;
7414 	ql_tgt_t		*tq = lq->target_queue;
7415 	ql_adapter_state_t	*ha = vha->pha;
7416 
7417 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7418 
7419 	if (ddi_in_panic()) {
7420 		DEVICE_QUEUE_UNLOCK(tq);
7421 		QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7422 		    ha->instance);
7423 		return;
7424 	}
7425 
7426 	while ((link = lq->cmd.first) != NULL) {
7427 		sp = link->base_address;
7428 
7429 		/* Exit if can not start commands. */
7430 		if (DRIVER_SUSPENDED(ha) ||
7431 		    (ha->flags & ONLINE) == 0 ||
7432 		    !VALID_DEVICE_ID(ha, tq->loop_id) ||
7433 		    sp->flags & SRB_ABORT ||
7434 		    tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7435 		    TQF_QUEUE_SUSPENDED)) {
7436 			EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7437 			    "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7438 			    ha->task_daemon_flags, tq->flags, sp->flags,
7439 			    ha->flags, tq->loop_id);
7440 			break;
7441 		}
7442 
7443 		/*
7444 		 * Find out the LUN number for untagged command use.
7445 		 * If there is an untagged command pending for the LUN,
7446 		 * we would not submit another untagged command
7447 		 * or if reached LUN execution throttle.
7448 		 */
7449 		if (sp->flags & SRB_FCP_CMD_PKT) {
7450 			if (lq->flags & LQF_UNTAGGED_PENDING ||
7451 			    lq->lun_outcnt >= ha->execution_throttle) {
7452 				QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7453 				    "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7454 				    tq->d_id.b24, lq->flags, lq->lun_outcnt);
7455 				break;
7456 			}
7457 			if (sp->fcp->fcp_cntl.cntl_qtype ==
7458 			    FCP_QTYPE_UNTAGGED) {
7459 				/*
7460 				 * Set the untagged-flag for the LUN
7461 				 * so that no more untagged commands
7462 				 * can be submitted for this LUN.
7463 				 */
7464 				lq->flags |= LQF_UNTAGGED_PENDING;
7465 			}
7466 
7467 			/* Count command as sent. */
7468 			lq->lun_outcnt++;
7469 		}
7470 
7471 		/* Remove srb from device queue. */
7472 		ql_remove_link(&lq->cmd, &sp->cmd);
7473 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7474 
7475 		tq->outcnt++;
7476 
7477 		ql_start_iocb(vha, sp);
7478 	}
7479 
7480 	/* Release device queue lock. */
7481 	DEVICE_QUEUE_UNLOCK(tq);
7482 
7483 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7484 }
7485 
7486 /*
7487  * ql_done
7488  *	Process completed commands.
7489  *
7490  * Input:
7491  *	link:	first command link in chain.
7492  *
7493  * Context:
7494  *	Interrupt or Kernel context, no mailbox commands allowed.
7495  */
7496 void
7497 ql_done(ql_link_t *link)
7498 {
7499 	ql_adapter_state_t	*ha;
7500 	ql_link_t		*next_link;
7501 	ql_srb_t		*sp;
7502 	ql_tgt_t		*tq;
7503 	ql_lun_t		*lq;
7504 
7505 	QL_PRINT_3(CE_CONT, "started\n");
7506 
7507 	for (; link != NULL; link = next_link) {
7508 		next_link = link->next;
7509 		sp = link->base_address;
7510 		ha = sp->ha;
7511 
7512 		if (sp->flags & SRB_UB_CALLBACK) {
7513 			QL_UB_LOCK(ha);
7514 			if (sp->flags & SRB_UB_IN_ISP) {
7515 				if (ha->ub_outcnt != 0) {
7516 					ha->ub_outcnt--;
7517 				}
7518 				QL_UB_UNLOCK(ha);
7519 				ql_isp_rcvbuf(ha);
7520 				QL_UB_LOCK(ha);
7521 			}
7522 			QL_UB_UNLOCK(ha);
7523 			ql_awaken_task_daemon(ha, sp, 0, 0);
7524 		} else {
7525 			/* Free outstanding command slot. */
7526 			if (sp->handle != 0) {
7527 				ha->outstanding_cmds[
7528 				    sp->handle & OSC_INDEX_MASK] = NULL;
7529 				sp->handle = 0;
7530 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7531 			}
7532 
7533 			/* Acquire device queue lock. */
7534 			lq = sp->lun_queue;
7535 			tq = lq->target_queue;
7536 			DEVICE_QUEUE_LOCK(tq);
7537 
7538 			/* Decrement outstanding commands on device. */
7539 			if (tq->outcnt != 0) {
7540 				tq->outcnt--;
7541 			}
7542 
7543 			if (sp->flags & SRB_FCP_CMD_PKT) {
7544 				if (sp->fcp->fcp_cntl.cntl_qtype ==
7545 				    FCP_QTYPE_UNTAGGED) {
7546 					/*
7547 					 * Clear the flag for this LUN so that
7548 					 * untagged commands can be submitted
7549 					 * for it.
7550 					 */
7551 					lq->flags &= ~LQF_UNTAGGED_PENDING;
7552 				}
7553 
7554 				if (lq->lun_outcnt != 0) {
7555 					lq->lun_outcnt--;
7556 				}
7557 			}
7558 
7559 			/* Reset port down retry count on good completion. */
7560 			if (sp->pkt->pkt_reason == CS_COMPLETE) {
7561 				tq->port_down_retry_count =
7562 				    ha->port_down_retry_count;
7563 				tq->qfull_retry_count = ha->qfull_retry_count;
7564 			}
7565 
7566 			/* Place request back on top of target command queue */
7567 			if ((sp->flags & SRB_MS_PKT ||
7568 			    !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7569 			    sp->flags & SRB_RETRY &&
7570 			    (sp->flags & SRB_WATCHDOG_ENABLED &&
7571 			    sp->wdg_q_time > 1)) {
7572 				sp->flags &= ~(SRB_ISP_STARTED |
7573 				    SRB_ISP_COMPLETED | SRB_RETRY);
7574 
7575 				/* Reset watchdog timer */
7576 				sp->wdg_q_time = sp->init_wdg_q_time;
7577 
7578 				/* Issue marker command on reset status. */
7579 				if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7580 				    (sp->pkt->pkt_reason == CS_RESET ||
7581 				    (CFG_IST(ha, CFG_CTRL_2425) &&
7582 				    sp->pkt->pkt_reason == CS_ABORTED))) {
7583 					(void) ql_marker(ha, tq->loop_id, 0,
7584 					    MK_SYNC_ID);
7585 				}
7586 
7587 				ql_add_link_t(&lq->cmd, &sp->cmd);
7588 				sp->flags |= SRB_IN_DEVICE_QUEUE;
7589 				ql_next(ha, lq);
7590 			} else {
7591 				/* Remove command from watchdog queue. */
7592 				if (sp->flags & SRB_WATCHDOG_ENABLED) {
7593 					ql_remove_link(&tq->wdg, &sp->wdg);
7594 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
7595 				}
7596 
7597 				if (lq->cmd.first != NULL) {
7598 					ql_next(ha, lq);
7599 				} else {
7600 					/* Release LU queue specific lock. */
7601 					DEVICE_QUEUE_UNLOCK(tq);
7602 					if (ha->pha->pending_cmds.first !=
7603 					    NULL) {
7604 						ql_start_iocb(ha, NULL);
7605 					}
7606 				}
7607 
7608 				/* Sync buffers if required.  */
7609 				if (sp->flags & SRB_MS_PKT) {
7610 					(void) ddi_dma_sync(
7611 					    sp->pkt->pkt_resp_dma,
7612 					    0, 0, DDI_DMA_SYNC_FORCPU);
7613 				}
7614 
7615 				/* Map ISP completion codes. */
7616 				sp->pkt->pkt_expln = FC_EXPLN_NONE;
7617 				sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7618 				switch (sp->pkt->pkt_reason) {
7619 				case CS_COMPLETE:
7620 					sp->pkt->pkt_state = FC_PKT_SUCCESS;
7621 					break;
7622 				case CS_RESET:
7623 					/* Issue marker command. */
7624 					if (!(ha->task_daemon_flags &
7625 					    LOOP_DOWN)) {
7626 						(void) ql_marker(ha,
7627 						    tq->loop_id, 0,
7628 						    MK_SYNC_ID);
7629 					}
7630 					sp->pkt->pkt_state =
7631 					    FC_PKT_PORT_OFFLINE;
7632 					sp->pkt->pkt_reason =
7633 					    FC_REASON_ABORTED;
7634 					break;
7635 				case CS_RESOUCE_UNAVAILABLE:
7636 					sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7637 					sp->pkt->pkt_reason =
7638 					    FC_REASON_PKT_BUSY;
7639 					break;
7640 
7641 				case CS_TIMEOUT:
7642 					sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7643 					sp->pkt->pkt_reason =
7644 					    FC_REASON_HW_ERROR;
7645 					break;
7646 				case CS_DATA_OVERRUN:
7647 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7648 					sp->pkt->pkt_reason =
7649 					    FC_REASON_OVERRUN;
7650 					break;
7651 				case CS_PORT_UNAVAILABLE:
7652 				case CS_PORT_LOGGED_OUT:
7653 					sp->pkt->pkt_state =
7654 					    FC_PKT_PORT_OFFLINE;
7655 					sp->pkt->pkt_reason =
7656 					    FC_REASON_LOGIN_REQUIRED;
7657 					ql_send_logo(ha, tq, NULL);
7658 					break;
7659 				case CS_PORT_CONFIG_CHG:
7660 					sp->pkt->pkt_state =
7661 					    FC_PKT_PORT_OFFLINE;
7662 					sp->pkt->pkt_reason =
7663 					    FC_REASON_OFFLINE;
7664 					break;
7665 				case CS_QUEUE_FULL:
7666 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7667 					sp->pkt->pkt_reason = FC_REASON_QFULL;
7668 					break;
7669 
7670 				case CS_ABORTED:
7671 					DEVICE_QUEUE_LOCK(tq);
7672 					if (tq->flags & (TQF_RSCN_RCVD |
7673 					    TQF_NEED_AUTHENTICATION)) {
7674 						sp->pkt->pkt_state =
7675 						    FC_PKT_PORT_OFFLINE;
7676 						sp->pkt->pkt_reason =
7677 						    FC_REASON_LOGIN_REQUIRED;
7678 					} else {
7679 						sp->pkt->pkt_state =
7680 						    FC_PKT_LOCAL_RJT;
7681 						sp->pkt->pkt_reason =
7682 						    FC_REASON_ABORTED;
7683 					}
7684 					DEVICE_QUEUE_UNLOCK(tq);
7685 					break;
7686 
7687 				case CS_TRANSPORT:
7688 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7689 					sp->pkt->pkt_reason =
7690 					    FC_REASON_XCHG_DROPPED;
7691 					EL(ha, "state=rjt, reason=dropped\n");
7692 					break;
7693 
7694 				case CS_FCP_RESPONSE_ERROR:
7695 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7696 					sp->pkt->pkt_reason =
7697 					    FC_REASON_CRC_ERROR;
7698 					EL(ha, "state=rjt, reason=crc\n");
7699 					break;
7700 
7701 				case CS_DATA_UNDERRUN:
7702 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7703 					sp->pkt->pkt_reason =
7704 					    FC_REASON_UNDERRUN;
7705 					break;
7706 				case CS_DMA_ERROR:
7707 				case CS_BAD_PAYLOAD:
7708 				case CS_UNKNOWN:
7709 				case CS_CMD_FAILED:
7710 				default:
7711 					sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7712 					sp->pkt->pkt_reason =
7713 					    FC_REASON_HW_ERROR;
7714 					break;
7715 				}
7716 
7717 				/* Now call the pkt completion callback */
7718 				if (sp->flags & SRB_POLL) {
7719 					sp->flags &= ~SRB_POLL;
7720 				} else if (sp->pkt->pkt_comp) {
7721 					if (sp->pkt->pkt_tran_flags &
7722 					    FC_TRAN_IMMEDIATE_CB) {
7723 						(*sp->pkt->pkt_comp)(sp->pkt);
7724 					} else {
7725 						ql_awaken_task_daemon(ha, sp,
7726 						    0, 0);
7727 					}
7728 				}
7729 			}
7730 		}
7731 	}
7732 
7733 	QL_PRINT_3(CE_CONT, "done\n");
7734 }
7735 
7736 /*
7737  * ql_awaken_task_daemon
7738  *	Adds command completion callback to callback queue and/or
7739  *	awakens task daemon thread.
7740  *
7741  * Input:
7742  *	ha:		adapter state pointer.
7743  *	sp:		srb pointer.
7744  *	set_flags:	task daemon flags to set.
7745  *	reset_flags:	task daemon flags to reset.
7746  *
7747  * Context:
7748  *	Interrupt or Kernel context, no mailbox commands allowed.
7749  */
7750 void
7751 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7752     uint32_t set_flags, uint32_t reset_flags)
7753 {
7754 	ql_adapter_state_t	*ha = vha->pha;
7755 
7756 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7757 
7758 	/* Acquire task daemon lock. */
7759 	TASK_DAEMON_LOCK(ha);
7760 
7761 	if (set_flags & ISP_ABORT_NEEDED) {
7762 		if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
7763 			set_flags &= ~ISP_ABORT_NEEDED;
7764 		}
7765 	}
7766 
7767 	ha->task_daemon_flags |= set_flags;
7768 	ha->task_daemon_flags &= ~reset_flags;
7769 
7770 	if (QL_DAEMON_SUSPENDED(ha)) {
7771 		if (sp != NULL) {
7772 			TASK_DAEMON_UNLOCK(ha);
7773 
7774 			/* Do callback. */
7775 			if (sp->flags & SRB_UB_CALLBACK) {
7776 				ql_unsol_callback(sp);
7777 			} else {
7778 				(*sp->pkt->pkt_comp)(sp->pkt);
7779 			}
7780 		} else {
7781 			if (!(curthread->t_flag & T_INTR_THREAD) &&
7782 			    !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
7783 				ha->task_daemon_flags |= TASK_THREAD_CALLED;
7784 				ql_task_thread(ha);
7785 				ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7786 			}
7787 
7788 			TASK_DAEMON_UNLOCK(ha);
7789 		}
7790 	} else {
7791 		if (sp != NULL) {
7792 			ql_add_link_b(&ha->callback_queue, &sp->cmd);
7793 		}
7794 
7795 		if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
7796 			cv_broadcast(&ha->cv_task_daemon);
7797 		}
7798 		TASK_DAEMON_UNLOCK(ha);
7799 	}
7800 
7801 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7802 }
7803 
7804 /*
7805  * ql_task_daemon
7806  *	Thread that is awaken by the driver when a
7807  *	background needs to be done.
7808  *
7809  * Input:
7810  *	arg = adapter state pointer.
7811  *
7812  * Context:
7813  *	Kernel context.
7814  */
7815 static void
7816 ql_task_daemon(void *arg)
7817 {
7818 	ql_adapter_state_t	*ha = (void *)arg;
7819 
7820 	QL_PRINT_3(CE_CONT, "\n(%d): started\n", ha->instance);
7821 
7822 	CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
7823 	    "ql_task_daemon");
7824 
7825 	/* Acquire task daemon lock. */
7826 	TASK_DAEMON_LOCK(ha);
7827 
7828 	ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
7829 
7830 	while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
7831 		ql_task_thread(ha);
7832 
7833 		QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
7834 
7835 		/*
7836 		 * Before we wait on the conditional variable, we
7837 		 * need to check if STOP_FLG is set for us to terminate
7838 		 */
7839 		if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
7840 			break;
7841 		}
7842 
7843 		/*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
7844 		CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
7845 
7846 		ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
7847 
7848 		/* If killed, stop task daemon */
7849 		if (cv_wait_sig(&ha->cv_task_daemon,
7850 		    &ha->task_daemon_mutex) == 0) {
7851 			ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
7852 		}
7853 
7854 		ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
7855 
7856 		/*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
7857 		CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
7858 
7859 		QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
7860 	}
7861 
7862 	ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
7863 	    TASK_DAEMON_ALIVE_FLG);
7864 
7865 	/*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
7866 	CALLB_CPR_EXIT(&ha->cprinfo);
7867 
7868 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7869 
7870 	thread_exit();
7871 }
7872 
7873 /*
7874  * ql_task_thread
7875  *	Thread run by daemon.
7876  *
7877  * Input:
7878  *	ha = adapter state pointer.
7879  *	TASK_DAEMON_LOCK must be acquired prior to call.
7880  *
7881  * Context:
7882  *	Kernel context.
7883  */
7884 static void
7885 ql_task_thread(ql_adapter_state_t *ha)
7886 {
7887 /*LINTED*/
7888 	int			loop_again;
7889 	ql_srb_t		*sp;
7890 	ql_head_t		*head;
7891 	ql_link_t		*link;
7892 	caddr_t			msg;
7893 	ql_adapter_state_t	*vha;
7894 
7895 	do {
7896 		QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
7897 		    ha->instance, ha->task_daemon_flags);
7898 
7899 		loop_again = FALSE;
7900 
7901 		QL_PM_LOCK(ha);
7902 		if (ha->power_level != PM_LEVEL_D0 ||
7903 		    ha->flags & ADAPTER_SUSPENDED ||
7904 		    ha->task_daemon_flags & (TASK_DAEMON_STOP_FLG |
7905 		    DRIVER_STALL) ||
7906 		    (ha->flags & ONLINE) == 0) {
7907 			QL_PM_UNLOCK(ha);
7908 			ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
7909 			break;
7910 		}
7911 		QL_PM_UNLOCK(ha);
7912 
7913 		ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
7914 
7915 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
7916 			TASK_DAEMON_UNLOCK(ha);
7917 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
7918 			TASK_DAEMON_LOCK(ha);
7919 			loop_again = TRUE;
7920 		}
7921 
7922 		/* Idle Check. */
7923 		if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
7924 			ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
7925 			if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
7926 				TASK_DAEMON_UNLOCK(ha);
7927 				ql_idle_check(ha);
7928 				TASK_DAEMON_LOCK(ha);
7929 				loop_again = TRUE;
7930 			}
7931 		}
7932 
7933 		/* Crystal+ port#0 bypass transition */
7934 		if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
7935 			ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
7936 			TASK_DAEMON_UNLOCK(ha);
7937 			(void) ql_initiate_lip(ha);
7938 			TASK_DAEMON_LOCK(ha);
7939 			loop_again = TRUE;
7940 		}
7941 
7942 		/* Abort queues needed. */
7943 		if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
7944 			ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
7945 			TASK_DAEMON_UNLOCK(ha);
7946 			ql_abort_queues(ha);
7947 			TASK_DAEMON_LOCK(ha);
7948 		}
7949 
7950 		/* Not suspended, awaken waiting routines. */
7951 		if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
7952 		    ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
7953 			ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
7954 			cv_broadcast(&ha->cv_dr_suspended);
7955 			loop_again = TRUE;
7956 		}
7957 
7958 		/* Handle RSCN changes. */
7959 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
7960 			if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
7961 				vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
7962 				TASK_DAEMON_UNLOCK(ha);
7963 				(void) ql_handle_rscn_update(vha);
7964 				TASK_DAEMON_LOCK(ha);
7965 				loop_again = TRUE;
7966 			}
7967 		}
7968 
7969 		/* Handle state changes. */
7970 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
7971 			if (vha->task_daemon_flags & FC_STATE_CHANGE &&
7972 			    !(ha->task_daemon_flags &
7973 			    TASK_DAEMON_POWERING_DOWN)) {
7974 				/* Report state change. */
7975 				EL(vha, "state change = %xh\n", vha->state);
7976 				vha->task_daemon_flags &= ~FC_STATE_CHANGE;
7977 
7978 				if (vha->task_daemon_flags &
7979 				    COMMAND_WAIT_NEEDED) {
7980 					vha->task_daemon_flags &=
7981 					    ~COMMAND_WAIT_NEEDED;
7982 					if (!(ha->task_daemon_flags &
7983 					    COMMAND_WAIT_ACTIVE)) {
7984 						ha->task_daemon_flags |=
7985 						    COMMAND_WAIT_ACTIVE;
7986 						TASK_DAEMON_UNLOCK(ha);
7987 						ql_cmd_wait(ha);
7988 						TASK_DAEMON_LOCK(ha);
7989 						ha->task_daemon_flags &=
7990 						    ~COMMAND_WAIT_ACTIVE;
7991 					}
7992 				}
7993 
7994 				msg = NULL;
7995 				if (FC_PORT_STATE_MASK(vha->state) ==
7996 				    FC_STATE_OFFLINE) {
7997 					if (vha->task_daemon_flags &
7998 					    STATE_ONLINE) {
7999 						if (ha->topology &
8000 						    QL_LOOP_CONNECTION) {
8001 							msg = "Loop OFFLINE";
8002 						} else {
8003 							msg = "Link OFFLINE";
8004 						}
8005 					}
8006 					vha->task_daemon_flags &=
8007 					    ~STATE_ONLINE;
8008 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8009 				    FC_STATE_LOOP) {
8010 					if (!(vha->task_daemon_flags &
8011 					    STATE_ONLINE)) {
8012 						msg = "Loop ONLINE";
8013 					}
8014 					vha->task_daemon_flags |= STATE_ONLINE;
8015 				} else if (FC_PORT_STATE_MASK(vha->state) ==
8016 				    FC_STATE_ONLINE) {
8017 					if (!(vha->task_daemon_flags &
8018 					    STATE_ONLINE)) {
8019 						msg = "Link ONLINE";
8020 					}
8021 					vha->task_daemon_flags |= STATE_ONLINE;
8022 				} else {
8023 					msg = "Unknown Link state";
8024 				}
8025 
8026 				if (msg != NULL) {
8027 					cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8028 					    "%s", QL_NAME, ha->instance,
8029 					    vha->vp_index, msg);
8030 				}
8031 
8032 				if (vha->flags & FCA_BOUND) {
8033 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8034 					    "cb state=%xh\n", ha->instance,
8035 					    vha->vp_index, vha->state);
8036 					TASK_DAEMON_UNLOCK(ha);
8037 					(vha->bind_info.port_statec_cb)
8038 					    (vha->bind_info.port_handle,
8039 					    vha->state);
8040 					TASK_DAEMON_LOCK(ha);
8041 				}
8042 				loop_again = TRUE;
8043 			}
8044 		}
8045 
8046 		if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8047 		    !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8048 			EL(ha, "processing LIP reset\n");
8049 			ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8050 			TASK_DAEMON_UNLOCK(ha);
8051 			for (vha = ha; vha != NULL; vha = vha->vp_next) {
8052 				if (vha->flags & FCA_BOUND) {
8053 					QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8054 					    "cb reset\n", ha->instance,
8055 					    vha->vp_index);
8056 					(vha->bind_info.port_statec_cb)
8057 					    (vha->bind_info.port_handle,
8058 					    FC_STATE_TARGET_PORT_RESET);
8059 				}
8060 			}
8061 			TASK_DAEMON_LOCK(ha);
8062 			loop_again = TRUE;
8063 		}
8064 
8065 		if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8066 		    FIRMWARE_UP)) {
8067 			/*
8068 			 * The firmware needs more unsolicited
8069 			 * buffers. We cannot allocate any new
8070 			 * buffers unless the ULP module requests
8071 			 * for new buffers. All we can do here is
8072 			 * to give received buffers from the pool
8073 			 * that is already allocated
8074 			 */
8075 			ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8076 			TASK_DAEMON_UNLOCK(ha);
8077 			ql_isp_rcvbuf(ha);
8078 			TASK_DAEMON_LOCK(ha);
8079 			loop_again = TRUE;
8080 		}
8081 
8082 		if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8083 			TASK_DAEMON_UNLOCK(ha);
8084 			(void) ql_abort_isp(ha);
8085 			TASK_DAEMON_LOCK(ha);
8086 			loop_again = TRUE;
8087 		}
8088 
8089 		if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8090 		    COMMAND_WAIT_NEEDED))) {
8091 			if (QL_IS_SET(ha->task_daemon_flags,
8092 			    RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8093 				ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8094 				if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8095 					ha->task_daemon_flags |= RESET_ACTIVE;
8096 					TASK_DAEMON_UNLOCK(ha);
8097 					for (vha = ha; vha != NULL;
8098 					    vha = vha->vp_next) {
8099 						ql_rst_aen(vha);
8100 					}
8101 					TASK_DAEMON_LOCK(ha);
8102 					ha->task_daemon_flags &= ~RESET_ACTIVE;
8103 					loop_again = TRUE;
8104 				}
8105 			}
8106 
8107 			if (QL_IS_SET(ha->task_daemon_flags,
8108 			    LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8109 				if (!(ha->task_daemon_flags &
8110 				    LOOP_RESYNC_ACTIVE)) {
8111 					ha->task_daemon_flags |=
8112 					    LOOP_RESYNC_ACTIVE;
8113 					TASK_DAEMON_UNLOCK(ha);
8114 					(void) ql_loop_resync(ha);
8115 					TASK_DAEMON_LOCK(ha);
8116 					loop_again = TRUE;
8117 				}
8118 			}
8119 		}
8120 
8121 		/* Port retry needed. */
8122 		if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8123 			ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8124 			ADAPTER_STATE_LOCK(ha);
8125 			ha->port_retry_timer = 0;
8126 			ADAPTER_STATE_UNLOCK(ha);
8127 
8128 			TASK_DAEMON_UNLOCK(ha);
8129 			ql_restart_queues(ha);
8130 			TASK_DAEMON_LOCK(ha);
8131 			loop_again = B_TRUE;
8132 		}
8133 
8134 		/* iiDMA setting needed? */
8135 		if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8136 			ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8137 
8138 			TASK_DAEMON_UNLOCK(ha);
8139 			ql_iidma(ha);
8140 			TASK_DAEMON_LOCK(ha);
8141 			loop_again = B_TRUE;
8142 		}
8143 
8144 		head = &ha->callback_queue;
8145 		if (head->first != NULL) {
8146 			sp = head->first->base_address;
8147 			link = &sp->cmd;
8148 
8149 			/* Dequeue command. */
8150 			ql_remove_link(head, link);
8151 
8152 			/* Release task daemon lock. */
8153 			TASK_DAEMON_UNLOCK(ha);
8154 
8155 			ASSERT((sp->flags & (SRB_IN_DEVICE_QUEUE |
8156 			    SRB_IN_TOKEN_ARRAY)) == 0);
8157 
8158 			/* Do callback. */
8159 			if (sp->flags & SRB_UB_CALLBACK) {
8160 				ql_unsol_callback(sp);
8161 			} else {
8162 				(*sp->pkt->pkt_comp)(sp->pkt);
8163 			}
8164 
8165 			/* Acquire task daemon lock. */
8166 			TASK_DAEMON_LOCK(ha);
8167 
8168 			loop_again = TRUE;
8169 		}
8170 
8171 	} while (loop_again);
8172 }
8173 
8174 /*
8175  * ql_idle_check
8176  *	Test for adapter is alive and well.
8177  *
8178  * Input:
8179  *	ha:	adapter state pointer.
8180  *
8181  * Context:
8182  *	Kernel context.
8183  */
8184 static void
8185 ql_idle_check(ql_adapter_state_t *ha)
8186 {
8187 	ddi_devstate_t	state;
8188 	int		rval;
8189 	ql_mbx_data_t	mr;
8190 
8191 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8192 
8193 	/* Firmware Ready Test. */
8194 	rval = ql_get_firmware_state(ha, &mr);
8195 	if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8196 	    (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8197 		EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8198 		state = ddi_get_devstate(ha->dip);
8199 		if (state == DDI_DEVSTATE_UP) {
8200 			/*EMPTY*/
8201 			ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8202 			    DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8203 		}
8204 		TASK_DAEMON_LOCK(ha);
8205 		if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8206 			EL(ha, "fstate_ready, isp_abort_needed\n");
8207 			ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8208 		}
8209 		TASK_DAEMON_UNLOCK(ha);
8210 	}
8211 
8212 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8213 }
8214 
8215 /*
8216  * ql_unsol_callback
8217  *	Handle unsolicited buffer callbacks.
8218  *
8219  * Input:
8220  *	ha = adapter state pointer.
8221  *	sp = srb pointer.
8222  *
8223  * Context:
8224  *	Kernel context.
8225  */
8226 static void
8227 ql_unsol_callback(ql_srb_t *sp)
8228 {
8229 	fc_affected_id_t	*af;
8230 	fc_unsol_buf_t		*ubp;
8231 	uchar_t			r_ctl;
8232 	uchar_t			ls_code;
8233 	ql_tgt_t		*tq;
8234 	ql_adapter_state_t	*ha = sp->ha, *pha = sp->ha->pha;
8235 
8236 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8237 
8238 	ubp = ha->ub_array[sp->handle];
8239 	r_ctl = ubp->ub_frame.r_ctl;
8240 	ls_code = ubp->ub_buffer[0];
8241 
8242 	if (sp->lun_queue == NULL) {
8243 		tq = NULL;
8244 	} else {
8245 		tq = sp->lun_queue->target_queue;
8246 	}
8247 
8248 	QL_UB_LOCK(ha);
8249 	if (sp->flags & SRB_UB_FREE_REQUESTED ||
8250 	    pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8251 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8252 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8253 		sp->flags |= SRB_UB_IN_FCA;
8254 		QL_UB_UNLOCK(ha);
8255 		return;
8256 	}
8257 
8258 	/* Process RSCN */
8259 	if (sp->flags & SRB_UB_RSCN) {
8260 		int sendup = 1;
8261 
8262 		/*
8263 		 * Defer RSCN posting until commands return
8264 		 */
8265 		QL_UB_UNLOCK(ha);
8266 
8267 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8268 
8269 		/* Abort outstanding commands */
8270 		sendup = ql_process_rscn(ha, af);
8271 		if (sendup == 0) {
8272 
8273 			TASK_DAEMON_LOCK(ha);
8274 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8275 			TASK_DAEMON_UNLOCK(ha);
8276 
8277 			/*
8278 			 * Wait for commands to drain in F/W (doesn't take
8279 			 * more than a few milliseconds)
8280 			 */
8281 			ql_delay(ha, 10000);
8282 
8283 			QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8284 			    "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8285 			    af->aff_format, af->aff_d_id);
8286 			return;
8287 		}
8288 
8289 		QL_UB_LOCK(ha);
8290 
8291 		EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8292 		    af->aff_format, af->aff_d_id);
8293 	}
8294 
8295 	/* Process UNSOL LOGO */
8296 	if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8297 		QL_UB_UNLOCK(ha);
8298 
8299 		if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8300 			TASK_DAEMON_LOCK(ha);
8301 			ql_add_link_b(&pha->callback_queue, &sp->cmd);
8302 			TASK_DAEMON_UNLOCK(ha);
8303 			QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8304 			    "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8305 			return;
8306 		}
8307 
8308 		QL_UB_LOCK(ha);
8309 		EL(ha, "sending unsol logout for %xh to transport\n",
8310 		    ubp->ub_frame.s_id);
8311 	}
8312 
8313 	sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8314 	    SRB_UB_FCP);
8315 
8316 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8317 		(void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8318 		    ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8319 	}
8320 	QL_UB_UNLOCK(ha);
8321 
8322 	(ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8323 	    ubp, sp->ub_type);
8324 
8325 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8326 }
8327 
8328 /*
8329  * ql_send_logo
8330  *
8331  * Input:
8332  *	ha:	adapter state pointer.
8333  *	tq:	target queue pointer.
8334  *	done_q:	done queue pointer.
8335  *
8336  * Context:
8337  *	Interrupt or Kernel context, no mailbox commands allowed.
8338  */
8339 void
8340 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8341 {
8342 	fc_unsol_buf_t		*ubp;
8343 	ql_srb_t		*sp;
8344 	la_els_logo_t		*payload;
8345 	ql_adapter_state_t	*ha = vha->pha;
8346 
8347 	if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8348 		return;
8349 	}
8350 
8351 	QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8352 	    tq->d_id.b24);
8353 
8354 	if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8355 	    tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8356 
8357 		/* Locate a buffer to use. */
8358 		ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8359 		if (ubp == NULL) {
8360 			EL(vha, "Failed, get_unsolicited_buffer\n");
8361 			return;
8362 		}
8363 
8364 		DEVICE_QUEUE_LOCK(tq);
8365 		tq->flags |= TQF_NEED_AUTHENTICATION;
8366 		tq->logout_sent++;
8367 		DEVICE_QUEUE_UNLOCK(tq);
8368 
8369 		EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8370 
8371 		sp = ubp->ub_fca_private;
8372 
8373 		/* Set header. */
8374 		ubp->ub_frame.d_id = vha->d_id.b24;
8375 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8376 		ubp->ub_frame.s_id = tq->d_id.b24;
8377 		ubp->ub_frame.rsvd = 0;
8378 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8379 		    F_CTL_SEQ_INITIATIVE;
8380 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8381 		ubp->ub_frame.seq_cnt = 0;
8382 		ubp->ub_frame.df_ctl = 0;
8383 		ubp->ub_frame.seq_id = 0;
8384 		ubp->ub_frame.rx_id = 0xffff;
8385 		ubp->ub_frame.ox_id = 0xffff;
8386 
8387 		/* set payload. */
8388 		payload = (la_els_logo_t *)ubp->ub_buffer;
8389 		bzero(payload, sizeof (la_els_logo_t));
8390 		/* Make sure ls_code in payload is always big endian */
8391 		ubp->ub_buffer[0] = LA_ELS_LOGO;
8392 		ubp->ub_buffer[1] = 0;
8393 		ubp->ub_buffer[2] = 0;
8394 		ubp->ub_buffer[3] = 0;
8395 		bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8396 		    &payload->nport_ww_name.raw_wwn[0], 8);
8397 		payload->nport_id.port_id = tq->d_id.b24;
8398 
8399 		QL_UB_LOCK(ha);
8400 		sp->flags |= SRB_UB_CALLBACK;
8401 		QL_UB_UNLOCK(ha);
8402 		if (tq->lun_queues.first != NULL) {
8403 			sp->lun_queue = (tq->lun_queues.first)->base_address;
8404 		} else {
8405 			sp->lun_queue = ql_lun_queue(vha, tq, 0);
8406 		}
8407 		if (done_q) {
8408 			ql_add_link_b(done_q, &sp->cmd);
8409 		} else {
8410 			ql_awaken_task_daemon(ha, sp, 0, 0);
8411 		}
8412 	}
8413 
8414 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8415 }
8416 
8417 static int
8418 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8419 {
8420 	port_id_t	d_id;
8421 	ql_srb_t	*sp;
8422 	ql_link_t	*link;
8423 	int		sendup = 1;
8424 
8425 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8426 
8427 	DEVICE_QUEUE_LOCK(tq);
8428 	if (tq->outcnt) {
8429 		DEVICE_QUEUE_UNLOCK(tq);
8430 		sendup = 0;
8431 		(void) ql_abort_device(ha, tq, 1);
8432 		ql_delay(ha, 10000);
8433 	} else {
8434 		DEVICE_QUEUE_UNLOCK(tq);
8435 		TASK_DAEMON_LOCK(ha);
8436 
8437 		for (link = ha->pha->callback_queue.first; link != NULL;
8438 		    link = link->next) {
8439 			sp = link->base_address;
8440 			if (sp->flags & SRB_UB_CALLBACK) {
8441 				continue;
8442 			}
8443 			d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8444 
8445 			if (tq->d_id.b24 == d_id.b24) {
8446 				sendup = 0;
8447 				break;
8448 			}
8449 		}
8450 
8451 		TASK_DAEMON_UNLOCK(ha);
8452 	}
8453 
8454 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8455 
8456 	return (sendup);
8457 }
8458 
8459 static int
8460 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8461 {
8462 	fc_unsol_buf_t		*ubp;
8463 	ql_srb_t		*sp;
8464 	la_els_logi_t		*payload;
8465 	class_svc_param_t	*class3_param;
8466 
8467 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8468 
8469 	if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8470 	    LOOP_DOWN)) {
8471 		EL(ha, "Failed, tqf=%xh\n", tq->flags);
8472 		return (QL_FUNCTION_FAILED);
8473 	}
8474 
8475 	/* Locate a buffer to use. */
8476 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8477 	if (ubp == NULL) {
8478 		EL(ha, "Failed\n");
8479 		return (QL_FUNCTION_FAILED);
8480 	}
8481 
8482 	QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8483 	    ha->instance, tq->d_id.b24);
8484 
8485 	sp = ubp->ub_fca_private;
8486 
8487 	/* Set header. */
8488 	ubp->ub_frame.d_id = ha->d_id.b24;
8489 	ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8490 	ubp->ub_frame.s_id = tq->d_id.b24;
8491 	ubp->ub_frame.rsvd = 0;
8492 	ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8493 	    F_CTL_SEQ_INITIATIVE;
8494 	ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8495 	ubp->ub_frame.seq_cnt = 0;
8496 	ubp->ub_frame.df_ctl = 0;
8497 	ubp->ub_frame.seq_id = 0;
8498 	ubp->ub_frame.rx_id = 0xffff;
8499 	ubp->ub_frame.ox_id = 0xffff;
8500 
8501 	/* set payload. */
8502 	payload = (la_els_logi_t *)ubp->ub_buffer;
8503 	bzero(payload, sizeof (payload));
8504 
8505 	payload->ls_code.ls_code = LA_ELS_PLOGI;
8506 	payload->common_service.fcph_version = 0x2006;
8507 	payload->common_service.cmn_features = 0x8800;
8508 
8509 	CFG_IST(ha, CFG_CTRL_2425) ?
8510 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8511 	    ha->init_ctrl_blk.cb24.max_frame_length[0],
8512 	    ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8513 	    (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8514 	    ha->init_ctrl_blk.cb.max_frame_length[0],
8515 	    ha->init_ctrl_blk.cb.max_frame_length[1]));
8516 
8517 	payload->common_service.conc_sequences = 0xff;
8518 	payload->common_service.relative_offset = 0x03;
8519 	payload->common_service.e_d_tov = 0x7d0;
8520 
8521 	bcopy((void *)&tq->port_name[0],
8522 	    (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8523 
8524 	bcopy((void *)&tq->node_name[0],
8525 	    (void *)&payload->node_ww_name.raw_wwn[0], 8);
8526 
8527 	class3_param = (class_svc_param_t *)&payload->class_3;
8528 	class3_param->class_valid_svc_opt = 0x8000;
8529 	class3_param->recipient_ctl = tq->class3_recipient_ctl;
8530 	class3_param->rcv_data_size = tq->class3_rcv_data_size;
8531 	class3_param->conc_sequences = tq->class3_conc_sequences;
8532 	class3_param->open_sequences_per_exch =
8533 	    tq->class3_open_sequences_per_exch;
8534 
8535 	QL_UB_LOCK(ha);
8536 	sp->flags |= SRB_UB_CALLBACK;
8537 	QL_UB_UNLOCK(ha);
8538 
8539 	if (done_q) {
8540 		ql_add_link_b(done_q, &sp->cmd);
8541 	} else {
8542 		ql_awaken_task_daemon(ha, sp, 0, 0);
8543 	}
8544 
8545 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8546 
8547 	return (QL_SUCCESS);
8548 }
8549 
8550 /*
8551  * Abort outstanding commands in the Firmware, clear internally
8552  * queued commands in the driver, Synchronize the target with
8553  * the Firmware
8554  */
8555 int
8556 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8557 {
8558 	ql_link_t	*link, *link2;
8559 	ql_lun_t	*lq;
8560 	int		rval = QL_SUCCESS;
8561 	ql_srb_t	*sp;
8562 	ql_head_t	done_q = { NULL, NULL };
8563 
8564 	QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8565 
8566 	/*
8567 	 * First clear, internally queued commands
8568 	 */
8569 	DEVICE_QUEUE_LOCK(tq);
8570 	for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8571 		lq = link->base_address;
8572 
8573 		link2 = lq->cmd.first;
8574 		while (link2 != NULL) {
8575 			sp = link2->base_address;
8576 			link2 = link2->next;
8577 
8578 			if (sp->flags & SRB_ABORT) {
8579 				continue;
8580 			}
8581 
8582 			/* Remove srb from device command queue. */
8583 			ql_remove_link(&lq->cmd, &sp->cmd);
8584 			sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8585 
8586 			/* Set ending status. */
8587 			sp->pkt->pkt_reason = CS_ABORTED;
8588 
8589 			/* Call done routine to handle completions. */
8590 			ql_add_link_b(&done_q, &sp->cmd);
8591 		}
8592 	}
8593 	DEVICE_QUEUE_UNLOCK(tq);
8594 
8595 	if (done_q.first != NULL) {
8596 		ql_done(done_q.first);
8597 	}
8598 
8599 	if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8600 		rval = ql_abort_target(ha, tq, 0);
8601 	}
8602 
8603 	if (rval != QL_SUCCESS) {
8604 		EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8605 	} else {
8606 		/*EMPTY*/
8607 		QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8608 		    ha->vp_index);
8609 	}
8610 
8611 	return (rval);
8612 }
8613 
8614 /*
8615  * ql_rcv_rscn_els
8616  *	Processes received RSCN extended link service.
8617  *
8618  * Input:
8619  *	ha:	adapter state pointer.
8620  *	mb:	array containing input mailbox registers.
8621  *	done_q:	done queue pointer.
8622  *
8623  * Context:
8624  *	Interrupt or Kernel context, no mailbox commands allowed.
8625  */
8626 void
8627 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8628 {
8629 	fc_unsol_buf_t		*ubp;
8630 	ql_srb_t		*sp;
8631 	fc_rscn_t		*rn;
8632 	fc_affected_id_t	*af;
8633 	port_id_t		d_id;
8634 
8635 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8636 
8637 	/* Locate a buffer to use. */
8638 	ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8639 	if (ubp != NULL) {
8640 		sp = ubp->ub_fca_private;
8641 
8642 		/* Set header. */
8643 		ubp->ub_frame.d_id = ha->d_id.b24;
8644 		ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8645 		ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8646 		ubp->ub_frame.rsvd = 0;
8647 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8648 		    F_CTL_SEQ_INITIATIVE;
8649 		ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8650 		ubp->ub_frame.seq_cnt = 0;
8651 		ubp->ub_frame.df_ctl = 0;
8652 		ubp->ub_frame.seq_id = 0;
8653 		ubp->ub_frame.rx_id = 0xffff;
8654 		ubp->ub_frame.ox_id = 0xffff;
8655 
8656 		/* set payload. */
8657 		rn = (fc_rscn_t *)ubp->ub_buffer;
8658 		af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8659 
8660 		rn->rscn_code = LA_ELS_RSCN;
8661 		rn->rscn_len = 4;
8662 		rn->rscn_payload_len = 8;
8663 		d_id.b.al_pa = LSB(mb[2]);
8664 		d_id.b.area = MSB(mb[2]);
8665 		d_id.b.domain =	LSB(mb[1]);
8666 		af->aff_d_id = d_id.b24;
8667 		af->aff_format = MSB(mb[1]);
8668 
8669 		EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8670 		    af->aff_d_id);
8671 
8672 		ql_update_rscn(ha, af);
8673 
8674 		QL_UB_LOCK(ha);
8675 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8676 		QL_UB_UNLOCK(ha);
8677 		ql_add_link_b(done_q, &sp->cmd);
8678 	}
8679 
8680 	if (ubp == NULL) {
8681 		EL(ha, "Failed, get_unsolicited_buffer\n");
8682 	} else {
8683 		/*EMPTY*/
8684 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8685 	}
8686 }
8687 
8688 /*
8689  * ql_update_rscn
8690  *	Update devices from received RSCN.
8691  *
8692  * Input:
8693  *	ha:	adapter state pointer.
8694  *	af:	pointer to RSCN data.
8695  *
8696  * Context:
8697  *	Interrupt or Kernel context, no mailbox commands allowed.
8698  */
8699 static void
8700 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8701 {
8702 	ql_link_t	*link;
8703 	uint16_t	index;
8704 	ql_tgt_t	*tq;
8705 
8706 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8707 
8708 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8709 		port_id_t d_id;
8710 
8711 		d_id.r.rsvd_1 = 0;
8712 		d_id.b24 = af->aff_d_id;
8713 
8714 		tq = ql_d_id_to_queue(ha, d_id);
8715 		if (tq) {
8716 			EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8717 			DEVICE_QUEUE_LOCK(tq);
8718 			tq->flags |= TQF_RSCN_RCVD;
8719 			DEVICE_QUEUE_UNLOCK(tq);
8720 		}
8721 		QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8722 		    ha->instance);
8723 
8724 		return;
8725 	}
8726 
8727 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8728 		for (link = ha->dev[index].first; link != NULL;
8729 		    link = link->next) {
8730 			tq = link->base_address;
8731 
8732 			switch (af->aff_format) {
8733 			case FC_RSCN_FABRIC_ADDRESS:
8734 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8735 					EL(ha, "SD_RSCN_RCVD %xh RFA\n",
8736 					    tq->d_id.b24);
8737 					DEVICE_QUEUE_LOCK(tq);
8738 					tq->flags |= TQF_RSCN_RCVD;
8739 					DEVICE_QUEUE_UNLOCK(tq);
8740 				}
8741 				break;
8742 
8743 			case FC_RSCN_AREA_ADDRESS:
8744 				if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
8745 					EL(ha, "SD_RSCN_RCVD %xh RAA\n",
8746 					    tq->d_id.b24);
8747 					DEVICE_QUEUE_LOCK(tq);
8748 					tq->flags |= TQF_RSCN_RCVD;
8749 					DEVICE_QUEUE_UNLOCK(tq);
8750 				}
8751 				break;
8752 
8753 			case FC_RSCN_DOMAIN_ADDRESS:
8754 				if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
8755 					EL(ha, "SD_RSCN_RCVD %xh RDA\n",
8756 					    tq->d_id.b24);
8757 					DEVICE_QUEUE_LOCK(tq);
8758 					tq->flags |= TQF_RSCN_RCVD;
8759 					DEVICE_QUEUE_UNLOCK(tq);
8760 				}
8761 				break;
8762 
8763 			default:
8764 				break;
8765 			}
8766 		}
8767 	}
8768 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8769 }
8770 
8771 /*
8772  * ql_process_rscn
8773  *
8774  * Input:
8775  *	ha:	adapter state pointer.
8776  *	af:	RSCN payload pointer.
8777  *
8778  * Context:
8779  *	Kernel context.
8780  */
8781 static int
8782 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8783 {
8784 	int		sendit;
8785 	int		sendup = 1;
8786 	ql_link_t	*link;
8787 	uint16_t	index;
8788 	ql_tgt_t	*tq;
8789 
8790 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8791 
8792 	if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8793 		port_id_t d_id;
8794 
8795 		d_id.r.rsvd_1 = 0;
8796 		d_id.b24 = af->aff_d_id;
8797 
8798 		tq = ql_d_id_to_queue(ha, d_id);
8799 		if (tq) {
8800 			sendup = ql_process_rscn_for_device(ha, tq);
8801 		}
8802 
8803 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8804 
8805 		return (sendup);
8806 	}
8807 
8808 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8809 		for (link = ha->dev[index].first; link != NULL;
8810 		    link = link->next) {
8811 
8812 			tq = link->base_address;
8813 			if (tq == NULL) {
8814 				continue;
8815 			}
8816 
8817 			switch (af->aff_format) {
8818 			case FC_RSCN_FABRIC_ADDRESS:
8819 				if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8820 					sendit = ql_process_rscn_for_device(
8821 					    ha, tq);
8822 					if (sendup) {
8823 						sendup = sendit;
8824 					}
8825 				}
8826 				break;
8827 
8828 			case FC_RSCN_AREA_ADDRESS:
8829 				if ((tq->d_id.b24 & 0xffff00) ==
8830 				    af->aff_d_id) {
8831 					sendit = ql_process_rscn_for_device(
8832 					    ha, tq);
8833 
8834 					if (sendup) {
8835 						sendup = sendit;
8836 					}
8837 				}
8838 				break;
8839 
8840 			case FC_RSCN_DOMAIN_ADDRESS:
8841 				if ((tq->d_id.b24 & 0xff0000) ==
8842 				    af->aff_d_id) {
8843 					sendit = ql_process_rscn_for_device(
8844 					    ha, tq);
8845 
8846 					if (sendup) {
8847 						sendup = sendit;
8848 					}
8849 				}
8850 				break;
8851 
8852 			default:
8853 				break;
8854 			}
8855 		}
8856 	}
8857 
8858 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8859 
8860 	return (sendup);
8861 }
8862 
8863 /*
8864  * ql_process_rscn_for_device
8865  *
8866  * Input:
8867  *	ha:	adapter state pointer.
8868  *	tq:	target queue pointer.
8869  *
8870  * Context:
8871  *	Kernel context.
8872  */
8873 static int
8874 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8875 {
8876 	int sendup = 1;
8877 
8878 	DEVICE_QUEUE_LOCK(tq);
8879 
8880 	/*
8881 	 * Let FCP-2 compliant devices continue I/Os
8882 	 * with their low level recoveries.
8883 	 */
8884 	if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
8885 	    (tq->prli_svc_param_word_3 & BIT_8)) {
8886 		/*
8887 		 * Cause ADISC to go out
8888 		 */
8889 		DEVICE_QUEUE_UNLOCK(tq);
8890 
8891 		(void) ql_get_port_database(ha, tq, PDF_NONE);
8892 
8893 		DEVICE_QUEUE_LOCK(tq);
8894 		tq->flags &= ~TQF_RSCN_RCVD;
8895 
8896 	} else if (tq->loop_id != PORT_NO_LOOP_ID) {
8897 		if (tq->d_id.b24 != BROADCAST_ADDR) {
8898 			tq->flags |= TQF_NEED_AUTHENTICATION;
8899 		}
8900 
8901 		DEVICE_QUEUE_UNLOCK(tq);
8902 
8903 		(void) ql_abort_device(ha, tq, 1);
8904 
8905 		DEVICE_QUEUE_LOCK(tq);
8906 
8907 		if (tq->outcnt) {
8908 			sendup = 0;
8909 		} else {
8910 			tq->flags &= ~TQF_RSCN_RCVD;
8911 		}
8912 	} else {
8913 		tq->flags &= ~TQF_RSCN_RCVD;
8914 	}
8915 
8916 	if (sendup) {
8917 		if (tq->d_id.b24 != BROADCAST_ADDR) {
8918 			tq->flags |= TQF_NEED_AUTHENTICATION;
8919 		}
8920 	}
8921 
8922 	DEVICE_QUEUE_UNLOCK(tq);
8923 
8924 	return (sendup);
8925 }
8926 
8927 static int
8928 ql_handle_rscn_update(ql_adapter_state_t *ha)
8929 {
8930 	int			rval;
8931 	ql_tgt_t		*tq;
8932 	uint16_t		index, loop_id;
8933 	ql_dev_id_list_t	*list;
8934 	uint32_t		list_size;
8935 	port_id_t		d_id;
8936 	ql_mbx_data_t		mr;
8937 	ql_head_t		done_q = { NULL, NULL };
8938 
8939 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8940 
8941 	list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
8942 	list = kmem_zalloc(list_size, KM_SLEEP);
8943 	if (list == NULL) {
8944 		rval = QL_MEMORY_ALLOC_FAILED;
8945 		EL(ha, "kmem_zalloc failed=%xh\n", rval);
8946 		return (rval);
8947 	}
8948 
8949 	/*
8950 	 * Get data from RISC code d_id list to init each device queue.
8951 	 */
8952 	rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
8953 	if (rval != QL_SUCCESS) {
8954 		kmem_free(list, list_size);
8955 		EL(ha, "get_id_list failed=%xh\n", rval);
8956 		return (rval);
8957 	}
8958 
8959 	/* Acquire adapter state lock. */
8960 	ADAPTER_STATE_LOCK(ha);
8961 
8962 	/* Check for new devices */
8963 	for (index = 0; index < mr.mb[1]; index++) {
8964 		ql_dev_list(ha, list, index, &d_id, &loop_id);
8965 
8966 		if (VALID_DEVICE_ID(ha, loop_id)) {
8967 			d_id.r.rsvd_1 = 0;
8968 
8969 			tq = ql_d_id_to_queue(ha, d_id);
8970 			if (tq != NULL) {
8971 				continue;
8972 			}
8973 
8974 			tq = ql_dev_init(ha, d_id, loop_id);
8975 
8976 			/* Test for fabric device. */
8977 			if (d_id.b.domain != ha->d_id.b.domain ||
8978 			    d_id.b.area != ha->d_id.b.area) {
8979 				tq->flags |= TQF_FABRIC_DEVICE;
8980 			}
8981 
8982 			ADAPTER_STATE_UNLOCK(ha);
8983 			if (ql_get_port_database(ha, tq, PDF_NONE) !=
8984 			    QL_SUCCESS) {
8985 				tq->loop_id = PORT_NO_LOOP_ID;
8986 			}
8987 			ADAPTER_STATE_LOCK(ha);
8988 
8989 			/*
8990 			 * Send up a PLOGI about the new device
8991 			 */
8992 			if (VALID_DEVICE_ID(ha, tq->loop_id)) {
8993 				(void) ql_send_plogi(ha, tq, &done_q);
8994 			}
8995 		}
8996 	}
8997 
8998 	/* Release adapter state lock. */
8999 	ADAPTER_STATE_UNLOCK(ha);
9000 
9001 	if (done_q.first != NULL) {
9002 		ql_done(done_q.first);
9003 	}
9004 
9005 	kmem_free(list, list_size);
9006 
9007 	if (rval != QL_SUCCESS) {
9008 		EL(ha, "failed=%xh\n", rval);
9009 	} else {
9010 		/*EMPTY*/
9011 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9012 	}
9013 
9014 	return (rval);
9015 }
9016 
9017 /*
9018  * ql_free_unsolicited_buffer
9019  *	Frees allocated buffer.
9020  *
9021  * Input:
9022  *	ha = adapter state pointer.
9023  *	index = buffer array index.
9024  *	ADAPTER_STATE_LOCK must be already obtained.
9025  *
9026  * Context:
9027  *	Kernel context.
9028  */
9029 static void
9030 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9031 {
9032 	ql_srb_t	*sp;
9033 	int		status;
9034 
9035 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9036 
9037 	sp = ubp->ub_fca_private;
9038 	if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9039 		/* Disconnect IP from system buffers. */
9040 		if (ha->flags & IP_INITIALIZED) {
9041 			ADAPTER_STATE_UNLOCK(ha);
9042 			status = ql_shutdown_ip(ha);
9043 			ADAPTER_STATE_LOCK(ha);
9044 			if (status != QL_SUCCESS) {
9045 				cmn_err(CE_WARN,
9046 				    "!Qlogic %s(%d): Failed to shutdown IP",
9047 				    QL_NAME, ha->instance);
9048 				return;
9049 			}
9050 
9051 			ha->flags &= ~IP_ENABLED;
9052 		}
9053 
9054 		ql_free_phys(ha, &sp->ub_buffer);
9055 	} else {
9056 		kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9057 	}
9058 
9059 	kmem_free(sp, sizeof (ql_srb_t));
9060 	kmem_free(ubp, sizeof (fc_unsol_buf_t));
9061 
9062 	if (ha->ub_allocated != 0) {
9063 		ha->ub_allocated--;
9064 	}
9065 
9066 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9067 }
9068 
9069 /*
9070  * ql_get_unsolicited_buffer
9071  *	Locates a free unsolicited buffer.
9072  *
9073  * Input:
9074  *	ha = adapter state pointer.
9075  *	type = buffer type.
9076  *
9077  * Returns:
9078  *	Unsolicited buffer pointer.
9079  *
9080  * Context:
9081  *	Interrupt or Kernel context, no mailbox commands allowed.
9082  */
9083 fc_unsol_buf_t *
9084 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9085 {
9086 	fc_unsol_buf_t	*ubp;
9087 	ql_srb_t	*sp;
9088 	uint16_t	index;
9089 
9090 	/* Locate a buffer to use. */
9091 	ubp = NULL;
9092 
9093 	QL_UB_LOCK(ha);
9094 	for (index = 0; index < QL_UB_LIMIT; index++) {
9095 		ubp = ha->ub_array[index];
9096 		if (ubp != NULL) {
9097 			sp = ubp->ub_fca_private;
9098 			if ((sp->ub_type == type) &&
9099 			    (sp->flags & SRB_UB_IN_FCA) &&
9100 			    (!(sp->flags & (SRB_UB_CALLBACK |
9101 			    SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9102 				sp->flags |= SRB_UB_ACQUIRED;
9103 				ubp->ub_resp_flags = 0;
9104 				break;
9105 			}
9106 			ubp = NULL;
9107 		}
9108 	}
9109 	QL_UB_UNLOCK(ha);
9110 
9111 	if (ubp) {
9112 		ubp->ub_resp_token = NULL;
9113 		ubp->ub_class = FC_TRAN_CLASS3;
9114 	}
9115 
9116 	return (ubp);
9117 }
9118 
9119 /*
9120  * ql_ub_frame_hdr
9121  *	Processes received unsolicited buffers from ISP.
9122  *
9123  * Input:
9124  *	ha:	adapter state pointer.
9125  *	tq:	target queue pointer.
9126  *	index:	unsolicited buffer array index.
9127  *	done_q:	done queue pointer.
9128  *
9129  * Returns:
9130  *	ql local function return status code.
9131  *
9132  * Context:
9133  *	Interrupt or Kernel context, no mailbox commands allowed.
9134  */
9135 int
9136 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9137     ql_head_t *done_q)
9138 {
9139 	fc_unsol_buf_t	*ubp;
9140 	ql_srb_t	*sp;
9141 	uint16_t	loop_id;
9142 	int		rval = QL_FUNCTION_FAILED;
9143 
9144 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9145 
9146 	QL_UB_LOCK(ha);
9147 	if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9148 		EL(ha, "Invalid buffer index=%xh\n", index);
9149 		QL_UB_UNLOCK(ha);
9150 		return (rval);
9151 	}
9152 
9153 	sp = ubp->ub_fca_private;
9154 	if (sp->flags & SRB_UB_FREE_REQUESTED) {
9155 		EL(ha, "buffer freed index=%xh\n", index);
9156 		sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9157 		    SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9158 
9159 		sp->flags |= SRB_UB_IN_FCA;
9160 
9161 		QL_UB_UNLOCK(ha);
9162 		return (rval);
9163 	}
9164 
9165 	if ((sp->handle == index) &&
9166 	    (sp->flags & SRB_UB_IN_ISP) &&
9167 	    (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9168 	    (!(sp->flags & SRB_UB_ACQUIRED))) {
9169 		/* set broadcast D_ID */
9170 		loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
9171 		    BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9172 		if (tq->ub_loop_id == loop_id) {
9173 			if (ha->topology & QL_FL_PORT) {
9174 				ubp->ub_frame.d_id = 0x000000;
9175 			} else {
9176 				ubp->ub_frame.d_id = 0xffffff;
9177 			}
9178 		} else {
9179 			ubp->ub_frame.d_id = ha->d_id.b24;
9180 		}
9181 		ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9182 		ubp->ub_frame.rsvd = 0;
9183 		ubp->ub_frame.s_id = tq->d_id.b24;
9184 		ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9185 		ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9186 		ubp->ub_frame.df_ctl = 0;
9187 		ubp->ub_frame.seq_id = tq->ub_seq_id;
9188 		ubp->ub_frame.rx_id = 0xffff;
9189 		ubp->ub_frame.ox_id = 0xffff;
9190 		ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9191 		    sp->ub_size : tq->ub_sequence_length;
9192 		ubp->ub_frame.ro = tq->ub_frame_ro;
9193 
9194 		tq->ub_sequence_length = (uint16_t)
9195 		    (tq->ub_sequence_length - ubp->ub_bufsize);
9196 		tq->ub_frame_ro += ubp->ub_bufsize;
9197 		tq->ub_seq_cnt++;
9198 
9199 		if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9200 			if (tq->ub_seq_cnt == 1) {
9201 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9202 				    F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9203 			} else {
9204 				ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9205 				    F_CTL_END_SEQ;
9206 			}
9207 			tq->ub_total_seg_cnt = 0;
9208 		} else if (tq->ub_seq_cnt == 1) {
9209 			ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9210 			    F_CTL_FIRST_SEQ;
9211 			ubp->ub_frame.df_ctl = 0x20;
9212 		}
9213 
9214 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9215 		    ha->instance, ubp->ub_frame.d_id);
9216 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9217 		    ha->instance, ubp->ub_frame.s_id);
9218 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9219 		    ha->instance, ubp->ub_frame.seq_cnt);
9220 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9221 		    ha->instance, ubp->ub_frame.seq_id);
9222 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9223 		    ha->instance, ubp->ub_frame.ro);
9224 		QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9225 		    ha->instance, ubp->ub_frame.f_ctl);
9226 		QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9227 		    ha->instance, ubp->ub_bufsize);
9228 		QL_DUMP_3(ubp->ub_buffer, 8,
9229 		    ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9230 
9231 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9232 		ql_add_link_b(done_q, &sp->cmd);
9233 		rval = QL_SUCCESS;
9234 	} else {
9235 		if (sp->handle != index) {
9236 			EL(ha, "Bad index=%xh, expect=%xh\n", index,
9237 			    sp->handle);
9238 		}
9239 		if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9240 			EL(ha, "buffer was already in driver, index=%xh\n",
9241 			    index);
9242 		}
9243 		if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9244 			EL(ha, "buffer was not an IP buffer, index=%xh\n",
9245 			    index);
9246 		}
9247 		if (sp->flags & SRB_UB_ACQUIRED) {
9248 			EL(ha, "buffer was being used by driver, index=%xh\n",
9249 			    index);
9250 		}
9251 	}
9252 	QL_UB_UNLOCK(ha);
9253 
9254 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9255 
9256 	return (rval);
9257 }
9258 
9259 /*
9260  * ql_timer
9261  *	One second timer function.
9262  *
9263  * Input:
9264  *	ql_hba.first = first link in adapter list.
9265  *
9266  * Context:
9267  *	Interrupt context, no mailbox commands allowed.
9268  */
9269 static void
9270 ql_timer(void *arg)
9271 {
9272 	ql_link_t		*link;
9273 	uint32_t		set_flags;
9274 	uint32_t		reset_flags;
9275 	ql_adapter_state_t	*ha = NULL, *vha;
9276 
9277 	QL_PRINT_6(CE_CONT, "started\n");
9278 
9279 	/* Acquire global state lock. */
9280 	GLOBAL_STATE_LOCK();
9281 	if (ql_timer_timeout_id == NULL) {
9282 		/* Release global state lock. */
9283 		GLOBAL_STATE_UNLOCK();
9284 		return;
9285 	}
9286 
9287 	for (link = ql_hba.first; link != NULL; link = link->next) {
9288 		ha = link->base_address;
9289 
9290 		/* Skip adapter if suspended of stalled. */
9291 		ADAPTER_STATE_LOCK(ha);
9292 		if (ha->flags & ADAPTER_SUSPENDED ||
9293 		    ha->task_daemon_flags & DRIVER_STALL) {
9294 			ADAPTER_STATE_UNLOCK(ha);
9295 			continue;
9296 		}
9297 		ha->flags |= ADAPTER_TIMER_BUSY;
9298 		ADAPTER_STATE_UNLOCK(ha);
9299 
9300 		QL_PM_LOCK(ha);
9301 		if (ha->power_level != PM_LEVEL_D0) {
9302 			QL_PM_UNLOCK(ha);
9303 
9304 			ADAPTER_STATE_LOCK(ha);
9305 			ha->flags &= ~ADAPTER_TIMER_BUSY;
9306 			ADAPTER_STATE_UNLOCK(ha);
9307 			continue;
9308 		}
9309 		ha->busy++;
9310 		QL_PM_UNLOCK(ha);
9311 
9312 		set_flags = 0;
9313 		reset_flags = 0;
9314 
9315 		/* Port retry timer handler. */
9316 		if (LOOP_READY(ha)) {
9317 			ADAPTER_STATE_LOCK(ha);
9318 			if (ha->port_retry_timer != 0) {
9319 				ha->port_retry_timer--;
9320 				if (ha->port_retry_timer == 0) {
9321 					set_flags |= PORT_RETRY_NEEDED;
9322 				}
9323 			}
9324 			ADAPTER_STATE_UNLOCK(ha);
9325 		}
9326 
9327 		/* Loop down timer handler. */
9328 		if (LOOP_RECONFIGURE(ha) == 0) {
9329 			if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9330 				ha->loop_down_timer--;
9331 				if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9332 					if (CFG_IST(ha,
9333 					    CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9334 						(void) ql_binary_fw_dump(ha,
9335 						    TRUE);
9336 					}
9337 					EL(ha, "loop_down_reset, "
9338 					    "isp_abort_needed\n");
9339 					set_flags |= ISP_ABORT_NEEDED;
9340 				}
9341 			}
9342 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9343 				/* Command abort time handler. */
9344 				if (ha->loop_down_timer ==
9345 				    ha->loop_down_abort_time) {
9346 					ADAPTER_STATE_LOCK(ha);
9347 					ha->flags |= COMMAND_ABORT_TIMEOUT;
9348 					ADAPTER_STATE_UNLOCK(ha);
9349 					set_flags |= ABORT_QUEUES_NEEDED;
9350 					EL(ha, "loop_down_abort_time, "
9351 					    "abort_queues_needed\n");
9352 				}
9353 
9354 				/* Watchdog timer handler. */
9355 				if (ha->watchdog_timer == 0) {
9356 					ha->watchdog_timer = WATCHDOG_TIME;
9357 				} else if (LOOP_READY(ha)) {
9358 					ha->watchdog_timer--;
9359 					if (ha->watchdog_timer == 0) {
9360 						for (vha = ha; vha != NULL;
9361 						    vha = vha->vp_next) {
9362 							ql_watchdog(vha,
9363 							    &set_flags,
9364 							    &reset_flags);
9365 						}
9366 						ha->watchdog_timer =
9367 						    WATCHDOG_TIME;
9368 					}
9369 				}
9370 			}
9371 		}
9372 
9373 		/* Idle timer handler. */
9374 		if (!DRIVER_SUSPENDED(ha)) {
9375 			if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9376 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9377 				set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9378 #endif
9379 				ha->idle_timer = 0;
9380 			}
9381 		}
9382 		if (set_flags != 0 || reset_flags != 0) {
9383 			ql_awaken_task_daemon(ha, NULL, set_flags,
9384 			    reset_flags);
9385 		}
9386 
9387 		if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9388 			ql_blink_led(ha);
9389 		}
9390 
9391 		/* Update the IO stats */
9392 		if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9393 			ha->xioctl->IOInputMByteCnt +=
9394 			    (ha->xioctl->IOInputByteCnt / 0x100000);
9395 			ha->xioctl->IOInputByteCnt %= 0x100000;
9396 		}
9397 
9398 		if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9399 			ha->xioctl->IOOutputMByteCnt +=
9400 			    (ha->xioctl->IOOutputByteCnt / 0x100000);
9401 			ha->xioctl->IOOutputByteCnt %= 0x100000;
9402 		}
9403 
9404 		ADAPTER_STATE_LOCK(ha);
9405 		ha->flags &= ~ADAPTER_TIMER_BUSY;
9406 		ADAPTER_STATE_UNLOCK(ha);
9407 
9408 		QL_PM_LOCK(ha);
9409 		ha->busy--;
9410 		QL_PM_UNLOCK(ha);
9411 	}
9412 
9413 	/* Restart timer, if not being stopped. */
9414 	if (ql_timer_timeout_id != NULL) {
9415 		ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9416 	}
9417 
9418 	/* Release global state lock. */
9419 	GLOBAL_STATE_UNLOCK();
9420 
9421 	QL_PRINT_6(CE_CONT, "done\n");
9422 }
9423 
9424 /*
9425  * ql_timeout_insert
9426  *	Function used to insert a command block onto the
9427  *	watchdog timer queue.
9428  *
9429  *	Note: Must insure that pkt_time is not zero
9430  *			before calling ql_timeout_insert.
9431  *
9432  * Input:
9433  *	ha:	adapter state pointer.
9434  *	tq:	target queue pointer.
9435  *	sp:	SRB pointer.
9436  *	DEVICE_QUEUE_LOCK must be already obtained.
9437  *
9438  * Context:
9439  *	Kernel context.
9440  */
9441 /* ARGSUSED */
9442 static void
9443 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9444 {
9445 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9446 
9447 	if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9448 		/* Make sure timeout >= 2 * R_A_TOV */
9449 		sp->isp_timeout = (uint16_t)
9450 		    (sp->pkt->pkt_timeout < ha->r_a_tov ? ha->r_a_tov :
9451 		    sp->pkt->pkt_timeout);
9452 
9453 		/*
9454 		 * The WATCHDOG_TIME must be rounded up + 1.  As an example,
9455 		 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9456 		 * will expire in the next watchdog call, which could be in
9457 		 * 1 microsecond.
9458 		 *
9459 		 * Add 6 more to insure watchdog does not timeout at the same
9460 		 * time as ISP RISC code timeout.
9461 		 */
9462 		sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9463 		    WATCHDOG_TIME;
9464 		sp->wdg_q_time += 6;
9465 
9466 		/* Save initial time for resetting watchdog time. */
9467 		sp->init_wdg_q_time = sp->wdg_q_time;
9468 
9469 		/* Insert command onto watchdog queue. */
9470 		ql_add_link_b(&tq->wdg, &sp->wdg);
9471 
9472 		sp->flags |= SRB_WATCHDOG_ENABLED;
9473 	} else {
9474 		sp->isp_timeout = 0;
9475 		sp->wdg_q_time = 0;
9476 		sp->init_wdg_q_time = 0;
9477 	}
9478 
9479 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9480 }
9481 
9482 /*
9483  * ql_watchdog
9484  *	Timeout handler that runs in interrupt context. The
9485  *	ql_adapter_state_t * argument is the parameter set up when the
9486  *	timeout was initialized (state structure pointer).
9487  *	Function used to update timeout values and if timeout
9488  *	has occurred command will be aborted.
9489  *
9490  * Input:
9491  *	ha:		adapter state pointer.
9492  *	set_flags:	task daemon flags to set.
9493  *	reset_flags:	task daemon flags to reset.
9494  *
9495  * Context:
9496  *	Interrupt context, no mailbox commands allowed.
9497  */
9498 static void
9499 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9500 {
9501 	ql_srb_t	*sp;
9502 	ql_link_t	*link;
9503 	ql_link_t	*next_cmd;
9504 	ql_link_t	*next_device;
9505 	ql_tgt_t	*tq;
9506 	ql_lun_t	*lq;
9507 	uint16_t	index;
9508 	int		q_sane;
9509 
9510 	QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9511 
9512 	/* Loop through all targets. */
9513 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9514 		for (link = ha->dev[index].first; link != NULL;
9515 		    link = next_device) {
9516 			tq = link->base_address;
9517 
9518 			/* Try to acquire device queue lock. */
9519 			if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9520 				next_device = NULL;
9521 				continue;
9522 			}
9523 
9524 			next_device = link->next;
9525 
9526 			if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9527 			    (tq->port_down_retry_count == 0)) {
9528 				/* Release device queue lock. */
9529 				DEVICE_QUEUE_UNLOCK(tq);
9530 				continue;
9531 			}
9532 
9533 			/* Find out if this device is in a sane state. */
9534 			if (tq->flags & (TQF_RSCN_RCVD |
9535 			    TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9536 				q_sane = 0;
9537 			} else {
9538 				q_sane = 1;
9539 			}
9540 			/* Loop through commands on watchdog queue. */
9541 			for (link = tq->wdg.first; link != NULL;
9542 			    link = next_cmd) {
9543 				next_cmd = link->next;
9544 				sp = link->base_address;
9545 				lq = sp->lun_queue;
9546 
9547 				/*
9548 				 * For SCSI commands, if everything seems to
9549 				 * be going fine and this packet is stuck
9550 				 * because of throttling at LUN or target
9551 				 * level then do not decrement the
9552 				 * sp->wdg_q_time
9553 				 */
9554 				if (ha->task_daemon_flags & STATE_ONLINE &&
9555 				    (sp->flags & SRB_ISP_STARTED) == 0 &&
9556 				    q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9557 				    lq->lun_outcnt >= ha->execution_throttle) {
9558 					continue;
9559 				}
9560 
9561 				if (sp->wdg_q_time != 0) {
9562 					sp->wdg_q_time--;
9563 
9564 					/* Timeout? */
9565 					if (sp->wdg_q_time != 0) {
9566 						continue;
9567 					}
9568 
9569 					ql_remove_link(&tq->wdg, &sp->wdg);
9570 					sp->flags &= ~SRB_WATCHDOG_ENABLED;
9571 
9572 					if (sp->flags & SRB_ISP_STARTED) {
9573 						ql_cmd_timeout(ha, tq, sp,
9574 						    set_flags, reset_flags);
9575 
9576 						DEVICE_QUEUE_UNLOCK(tq);
9577 						tq = NULL;
9578 						next_cmd = NULL;
9579 						next_device = NULL;
9580 						index = DEVICE_HEAD_LIST_SIZE;
9581 					} else {
9582 						ql_cmd_timeout(ha, tq, sp,
9583 						    set_flags, reset_flags);
9584 					}
9585 				}
9586 			}
9587 
9588 			/* Release device queue lock. */
9589 			if (tq != NULL) {
9590 				DEVICE_QUEUE_UNLOCK(tq);
9591 			}
9592 		}
9593 	}
9594 
9595 	QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9596 }
9597 
9598 /*
9599  * ql_cmd_timeout
9600  *	Command timeout handler.
9601  *
9602  * Input:
9603  *	ha:		adapter state pointer.
9604  *	tq:		target queue pointer.
9605  *	sp:		SRB pointer.
9606  *	set_flags:	task daemon flags to set.
9607  *	reset_flags:	task daemon flags to reset.
9608  *
9609  * Context:
9610  *	Interrupt context, no mailbox commands allowed.
9611  */
9612 /* ARGSUSED */
9613 static void
9614 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9615     uint32_t *set_flags, uint32_t *reset_flags)
9616 {
9617 
9618 	if (!(sp->flags & SRB_ISP_STARTED)) {
9619 
9620 		EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9621 
9622 		REQUEST_RING_LOCK(ha);
9623 
9624 		/* if it's on a queue */
9625 		if (sp->cmd.head) {
9626 			/*
9627 			 * The pending_cmds que needs to be
9628 			 * protected by the ring lock
9629 			 */
9630 			ql_remove_link(sp->cmd.head, &sp->cmd);
9631 		}
9632 		sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9633 
9634 		/* Release device queue lock. */
9635 		REQUEST_RING_UNLOCK(ha);
9636 		DEVICE_QUEUE_UNLOCK(tq);
9637 
9638 		/* Set timeout status */
9639 		sp->pkt->pkt_reason = CS_TIMEOUT;
9640 
9641 		/* Ensure no retry */
9642 		sp->flags &= ~SRB_RETRY;
9643 
9644 		/* Call done routine to handle completion. */
9645 		ql_done(&sp->cmd);
9646 
9647 		DEVICE_QUEUE_LOCK(tq);
9648 	} else {
9649 		EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9650 		    "isp_abort_needed\n", (void *)sp,
9651 		    (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9652 		    sp->handle & OSC_INDEX_MASK);
9653 
9654 		/* Release device queue lock. */
9655 		DEVICE_QUEUE_UNLOCK(tq);
9656 
9657 		INTR_LOCK(ha);
9658 		ha->pha->xioctl->ControllerErrorCount++;
9659 		INTR_UNLOCK(ha);
9660 
9661 		/* Set ISP needs to be reset */
9662 		sp->flags |= SRB_COMMAND_TIMEOUT;
9663 
9664 		if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
9665 			(void) ql_binary_fw_dump(ha, TRUE);
9666 		}
9667 
9668 		*set_flags |= ISP_ABORT_NEEDED;
9669 
9670 		DEVICE_QUEUE_LOCK(tq);
9671 	}
9672 }
9673 
9674 /*
9675  * ql_rst_aen
9676  *	Processes asynchronous reset.
9677  *
9678  * Input:
9679  *	ha = adapter state pointer.
9680  *
9681  * Context:
9682  *	Kernel context.
9683  */
9684 static void
9685 ql_rst_aen(ql_adapter_state_t *ha)
9686 {
9687 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9688 
9689 	/* Issue marker command. */
9690 	(void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
9691 
9692 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9693 }
9694 
9695 /*
9696  * ql_cmd_wait
9697  *	Stall driver until all outstanding commands are returned.
9698  *
9699  * Input:
9700  *	ha = adapter state pointer.
9701  *
9702  * Context:
9703  *	Kernel context.
9704  */
9705 void
9706 ql_cmd_wait(ql_adapter_state_t *ha)
9707 {
9708 	uint16_t		index;
9709 	ql_link_t		*link;
9710 	ql_tgt_t		*tq;
9711 	ql_adapter_state_t	*vha;
9712 
9713 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9714 
9715 	/* Wait for all outstanding commands to be returned. */
9716 	(void) ql_wait_outstanding(ha);
9717 
9718 	/*
9719 	 * clear out internally queued commands
9720 	 */
9721 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9722 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9723 			for (link = vha->dev[index].first; link != NULL;
9724 			    link = link->next) {
9725 				tq = link->base_address;
9726 				if (tq &&
9727 				    !(tq->prli_svc_param_word_3 & BIT_8)) {
9728 					(void) ql_abort_device(vha, tq, 0);
9729 				}
9730 			}
9731 		}
9732 	}
9733 
9734 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9735 }
9736 
9737 /*
9738  * ql_wait_outstanding
9739  *	Wait for all outstanding commands to complete.
9740  *
9741  * Input:
9742  *	ha = adapter state pointer.
9743  *
9744  * Returns:
9745  *	index - the index for ql_srb into outstanding_cmds.
9746  *
9747  * Context:
9748  *	Kernel context.
9749  */
9750 static uint16_t
9751 ql_wait_outstanding(ql_adapter_state_t *ha)
9752 {
9753 	ql_srb_t	*sp;
9754 	uint16_t	index, count;
9755 
9756 	count = 3000;
9757 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9758 		if (ha->pha->pending_cmds.first != NULL) {
9759 			ql_start_iocb(ha, NULL);
9760 			index = 1;
9761 		}
9762 		if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
9763 		    (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
9764 			if (count-- != 0) {
9765 				ql_delay(ha, 10000);
9766 				index = 0;
9767 			} else {
9768 				EL(ha, "failed, sp=%ph\n", (void *)sp);
9769 				break;
9770 			}
9771 		}
9772 	}
9773 
9774 	return (index);
9775 }
9776 
9777 /*
9778  * ql_restart_queues
9779  *	Restart device queues.
9780  *
9781  * Input:
9782  *	ha = adapter state pointer.
9783  *	DEVICE_QUEUE_LOCK must be released.
9784  *
9785  * Context:
9786  *	Interrupt or Kernel context, no mailbox commands allowed.
9787  */
9788 static void
9789 ql_restart_queues(ql_adapter_state_t *ha)
9790 {
9791 	ql_link_t		*link, *link2;
9792 	ql_tgt_t		*tq;
9793 	ql_lun_t		*lq;
9794 	uint16_t		index;
9795 	ql_adapter_state_t	*vha;
9796 
9797 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9798 
9799 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
9800 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9801 			for (link = vha->dev[index].first; link != NULL;
9802 			    link = link->next) {
9803 				tq = link->base_address;
9804 
9805 				/* Acquire device queue lock. */
9806 				DEVICE_QUEUE_LOCK(tq);
9807 
9808 				tq->flags &= ~TQF_QUEUE_SUSPENDED;
9809 
9810 				for (link2 = tq->lun_queues.first;
9811 				    link2 != NULL; link2 = link2->next) {
9812 					lq = link2->base_address;
9813 
9814 					if (lq->cmd.first != NULL) {
9815 						ql_next(vha, lq);
9816 						DEVICE_QUEUE_LOCK(tq);
9817 					}
9818 				}
9819 
9820 				/* Release device queue lock. */
9821 				DEVICE_QUEUE_UNLOCK(tq);
9822 			}
9823 		}
9824 	}
9825 
9826 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9827 }
9828 
9829 
9830 /*
9831  * ql_iidma
9832  *	Setup iiDMA parameters to firmware
9833  *
9834  * Input:
9835  *	ha = adapter state pointer.
9836  *	DEVICE_QUEUE_LOCK must be released.
9837  *
9838  * Context:
9839  *	Interrupt or Kernel context, no mailbox commands allowed.
9840  */
9841 static void
9842 ql_iidma(ql_adapter_state_t *ha)
9843 {
9844 	ql_link_t	*link;
9845 	ql_tgt_t	*tq;
9846 	uint16_t	index;
9847 	char		buf[256];
9848 	uint32_t	data;
9849 
9850 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9851 
9852 	if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
9853 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9854 		return;
9855 	}
9856 
9857 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9858 		for (link = ha->dev[index].first; link != NULL;
9859 		    link = link->next) {
9860 			tq = link->base_address;
9861 
9862 			/* Acquire device queue lock. */
9863 			DEVICE_QUEUE_LOCK(tq);
9864 
9865 			if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
9866 				DEVICE_QUEUE_UNLOCK(tq);
9867 				continue;
9868 			}
9869 
9870 			tq->flags &= ~TQF_IIDMA_NEEDED;
9871 
9872 			if ((tq->loop_id > LAST_N_PORT_HDL) ||
9873 			    (tq->iidma_rate == IIDMA_RATE_NDEF)) {
9874 				DEVICE_QUEUE_UNLOCK(tq);
9875 				continue;
9876 			}
9877 
9878 			/* Get the iiDMA persistent data */
9879 			if (tq->iidma_rate == IIDMA_RATE_INIT) {
9880 				(void) sprintf(buf,
9881 				    "iidma-rate-%02x%02x%02x%02x%02x"
9882 				    "%02x%02x%02x", tq->port_name[0],
9883 				    tq->port_name[1], tq->port_name[2],
9884 				    tq->port_name[3], tq->port_name[4],
9885 				    tq->port_name[5], tq->port_name[6],
9886 				    tq->port_name[7]);
9887 
9888 				if ((data = ql_get_prop(ha, buf)) ==
9889 				    0xffffffff) {
9890 					tq->iidma_rate = IIDMA_RATE_NDEF;
9891 				} else {
9892 					switch (data) {
9893 					case IIDMA_RATE_1GB:
9894 					case IIDMA_RATE_2GB:
9895 					case IIDMA_RATE_4GB:
9896 						tq->iidma_rate = data;
9897 						break;
9898 					case IIDMA_RATE_8GB:
9899 						if (CFG_IST(ha,
9900 						    CFG_CTRL_25XX)) {
9901 							tq->iidma_rate = data;
9902 						} else {
9903 							tq->iidma_rate =
9904 							    IIDMA_RATE_4GB;
9905 						}
9906 						break;
9907 					default:
9908 						EL(ha, "invalid data for "
9909 						    "parameter: %s: %xh\n",
9910 						    buf, data);
9911 						tq->iidma_rate =
9912 						    IIDMA_RATE_NDEF;
9913 						break;
9914 					}
9915 				}
9916 			}
9917 
9918 			/* Set the firmware's iiDMA rate */
9919 			if (tq->iidma_rate <= IIDMA_RATE_MAX) {
9920 				data = ql_iidma_rate(ha, tq->loop_id,
9921 				    &tq->iidma_rate, EXT_IIDMA_MODE_SET);
9922 				if (data != QL_SUCCESS) {
9923 					EL(ha, "mbx failed: %xh\n", data);
9924 				}
9925 			}
9926 
9927 			/* Release device queue lock. */
9928 			DEVICE_QUEUE_UNLOCK(tq);
9929 		}
9930 	}
9931 
9932 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9933 }
9934 
9935 /*
9936  * ql_abort_queues
9937  *	Abort all commands on device queues.
9938  *
9939  * Input:
9940  *	ha = adapter state pointer.
9941  *
9942  * Context:
9943  *	Interrupt or Kernel context, no mailbox commands allowed.
9944  */
9945 static void
9946 ql_abort_queues(ql_adapter_state_t *ha)
9947 {
9948 	ql_link_t		*link, *link1, *link2;
9949 	ql_tgt_t		*tq;
9950 	ql_lun_t		*lq;
9951 	ql_srb_t		*sp;
9952 	uint16_t		index;
9953 	ql_adapter_state_t	*vha;
9954 
9955 	QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
9956 
9957 	/* Return all commands in outstanding command list. */
9958 	INTR_LOCK(ha);
9959 
9960 	/* Place all commands in outstanding cmd list on device queue. */
9961 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
9962 		if (ha->pending_cmds.first != NULL) {
9963 			INTR_UNLOCK(ha);
9964 			ql_start_iocb(ha, NULL);
9965 			/* Delay for system */
9966 			ql_delay(ha, 10000);
9967 			INTR_LOCK(ha);
9968 			index = 1;
9969 		}
9970 		sp = ha->outstanding_cmds[index];
9971 		if (sp != NULL) {
9972 			ha->outstanding_cmds[index] = NULL;
9973 			sp->handle = 0;
9974 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
9975 
9976 			INTR_UNLOCK(ha);
9977 
9978 			/* Set ending status. */
9979 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
9980 			sp->flags |= SRB_ISP_COMPLETED;
9981 
9982 			/* Call done routine to handle completions. */
9983 			sp->cmd.next = NULL;
9984 			ql_done(&sp->cmd);
9985 
9986 			INTR_LOCK(ha);
9987 		}
9988 	}
9989 	INTR_UNLOCK(ha);
9990 
9991 	for (vha = ha; vha != NULL; vha = vha->vp_next) {
9992 		QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
9993 		    vha->instance, vha->vp_index);
9994 		for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9995 			for (link = vha->dev[index].first; link != NULL;
9996 			    link = link->next) {
9997 				tq = link->base_address;
9998 
9999 				/*
10000 				 * Set port unavailable status for
10001 				 * all commands on device queue.
10002 				 */
10003 				DEVICE_QUEUE_LOCK(tq);
10004 
10005 				for (link1 = tq->lun_queues.first;
10006 				    link1 != NULL; link1 = link1->next) {
10007 					lq = link1->base_address;
10008 
10009 					link2 = lq->cmd.first;
10010 					while (link2 != NULL) {
10011 						sp = link2->base_address;
10012 
10013 						if (sp->flags & SRB_ABORT) {
10014 							link2 = link2->next;
10015 							continue;
10016 						}
10017 
10018 						/* Rem srb from dev cmd q. */
10019 						ql_remove_link(&lq->cmd,
10020 						    &sp->cmd);
10021 						sp->flags &=
10022 						    ~SRB_IN_DEVICE_QUEUE;
10023 
10024 						/* Release device queue lock */
10025 						DEVICE_QUEUE_UNLOCK(tq);
10026 
10027 						/* Set ending status. */
10028 						sp->pkt->pkt_reason =
10029 						    CS_PORT_UNAVAILABLE;
10030 
10031 						/*
10032 						 * Call done routine to handle
10033 						 * completions.
10034 						 */
10035 						ql_done(&sp->cmd);
10036 
10037 						/* Delay for system */
10038 						ql_delay(ha, 10000);
10039 
10040 						/* Acquire device queue lock */
10041 						DEVICE_QUEUE_LOCK(tq);
10042 						link2 = lq->cmd.first;
10043 					}
10044 				}
10045 				/* Release device queue lock. */
10046 				DEVICE_QUEUE_UNLOCK(tq);
10047 			}
10048 		}
10049 	}
10050 
10051 	QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10052 }
10053 
10054 /*
10055  * ql_loop_resync
10056  *	Resync with fibre channel devices.
10057  *
10058  * Input:
10059  *	ha = adapter state pointer.
10060  *	DEVICE_QUEUE_LOCK must be released.
10061  *
10062  * Returns:
10063  *	ql local function return status code.
10064  *
10065  * Context:
10066  *	Kernel context.
10067  */
10068 static int
10069 ql_loop_resync(ql_adapter_state_t *ha)
10070 {
10071 	int rval;
10072 
10073 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10074 
10075 	if (ha->flags & IP_INITIALIZED) {
10076 		(void) ql_shutdown_ip(ha);
10077 	}
10078 
10079 	rval = ql_fw_ready(ha, 10);
10080 
10081 	TASK_DAEMON_LOCK(ha);
10082 	ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10083 	TASK_DAEMON_UNLOCK(ha);
10084 
10085 	/* Set loop online, if it really is. */
10086 	if (rval == QL_SUCCESS) {
10087 		ql_loop_online(ha);
10088 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10089 	} else {
10090 		EL(ha, "failed, rval = %xh\n", rval);
10091 	}
10092 
10093 	return (rval);
10094 }
10095 
10096 /*
10097  * ql_loop_online
10098  *	Set loop online status if it really is online.
10099  *
10100  * Input:
10101  *	ha = adapter state pointer.
10102  *	DEVICE_QUEUE_LOCK must be released.
10103  *
10104  * Context:
10105  *	Kernel context.
10106  */
10107 void
10108 ql_loop_online(ql_adapter_state_t *ha)
10109 {
10110 	ql_adapter_state_t	*vha;
10111 
10112 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10113 
10114 	/* Inform the FC Transport that the hardware is online. */
10115 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10116 		if (!(vha->task_daemon_flags &
10117 		    (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10118 			/* Restart IP if it was shutdown. */
10119 			if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10120 			    !(vha->flags & IP_INITIALIZED)) {
10121 				(void) ql_initialize_ip(vha);
10122 				ql_isp_rcvbuf(vha);
10123 			}
10124 
10125 			if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10126 			    FC_PORT_STATE_MASK(vha->state) !=
10127 			    FC_STATE_ONLINE) {
10128 				vha->state = FC_PORT_SPEED_MASK(vha->state);
10129 				if (vha->topology & QL_LOOP_CONNECTION) {
10130 					vha->state |= FC_STATE_LOOP;
10131 				} else {
10132 					vha->state |= FC_STATE_ONLINE;
10133 				}
10134 				TASK_DAEMON_LOCK(ha);
10135 				vha->task_daemon_flags |= FC_STATE_CHANGE;
10136 				TASK_DAEMON_UNLOCK(ha);
10137 			}
10138 		}
10139 	}
10140 
10141 	ql_awaken_task_daemon(ha, NULL, 0, 0);
10142 
10143 	/* Restart device queues that may have been stopped. */
10144 	ql_restart_queues(ha);
10145 
10146 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10147 }
10148 
10149 /*
10150  * ql_fca_handle_to_state
10151  *	Verifies handle to be correct.
10152  *
10153  * Input:
10154  *	fca_handle = pointer to state structure.
10155  *
10156  * Returns:
10157  *	NULL = failure
10158  *
10159  * Context:
10160  *	Kernel context.
10161  */
10162 static ql_adapter_state_t *
10163 ql_fca_handle_to_state(opaque_t fca_handle)
10164 {
10165 #ifdef	QL_DEBUG_ROUTINES
10166 	ql_link_t		*link;
10167 	ql_adapter_state_t	*ha = NULL;
10168 	ql_adapter_state_t	*vha = NULL;
10169 
10170 	for (link = ql_hba.first; link != NULL; link = link->next) {
10171 		ha = link->base_address;
10172 		for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10173 			if ((opaque_t)vha == fca_handle) {
10174 				ha = vha;
10175 				break;
10176 			}
10177 		}
10178 		if ((opaque_t)ha == fca_handle) {
10179 			break;
10180 		} else {
10181 			ha = NULL;
10182 		}
10183 	}
10184 
10185 	if (ha == NULL) {
10186 		/*EMPTY*/
10187 		QL_PRINT_2(CE_CONT, "failed\n");
10188 	}
10189 
10190 	ASSERT(ha != NULL);
10191 #endif /* QL_DEBUG_ROUTINES */
10192 
10193 	return ((ql_adapter_state_t *)fca_handle);
10194 }
10195 
10196 /*
10197  * ql_d_id_to_queue
10198  *	Locate device queue that matches destination ID.
10199  *
10200  * Input:
10201  *	ha = adapter state pointer.
10202  *	d_id = destination ID
10203  *
10204  * Returns:
10205  *	NULL = failure
10206  *
10207  * Context:
10208  *	Interrupt or Kernel context, no mailbox commands allowed.
10209  */
10210 ql_tgt_t *
10211 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10212 {
10213 	uint16_t	index;
10214 	ql_tgt_t	*tq;
10215 	ql_link_t	*link;
10216 
10217 	/* Get head queue index. */
10218 	index = ql_alpa_to_index[d_id.b.al_pa];
10219 
10220 	for (link = ha->dev[index].first; link != NULL; link = link->next) {
10221 		tq = link->base_address;
10222 		if (tq->d_id.b24 == d_id.b24 &&
10223 		    VALID_DEVICE_ID(ha, tq->loop_id)) {
10224 			return (tq);
10225 		}
10226 	}
10227 
10228 	return (NULL);
10229 }
10230 
10231 /*
10232  * ql_loop_id_to_queue
10233  *	Locate device queue that matches loop ID.
10234  *
10235  * Input:
10236  *	ha:		adapter state pointer.
10237  *	loop_id:	destination ID
10238  *
10239  * Returns:
10240  *	NULL = failure
10241  *
10242  * Context:
10243  *	Interrupt or Kernel context, no mailbox commands allowed.
10244  */
10245 ql_tgt_t *
10246 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10247 {
10248 	uint16_t	index;
10249 	ql_tgt_t	*tq;
10250 	ql_link_t	*link;
10251 
10252 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10253 		for (link = ha->dev[index].first; link != NULL;
10254 		    link = link->next) {
10255 			tq = link->base_address;
10256 			if (tq->loop_id == loop_id) {
10257 				return (tq);
10258 			}
10259 		}
10260 	}
10261 
10262 	return (NULL);
10263 }
10264 
10265 /*
10266  * ql_kstat_update
10267  *	Updates kernel statistics.
10268  *
10269  * Input:
10270  *	ksp - driver kernel statistics structure pointer.
10271  *	rw - function to perform
10272  *
10273  * Returns:
10274  *	0 or EACCES
10275  *
10276  * Context:
10277  *	Kernel context.
10278  */
10279 /* ARGSUSED */
10280 static int
10281 ql_kstat_update(kstat_t *ksp, int rw)
10282 {
10283 	int			rval;
10284 
10285 	QL_PRINT_3(CE_CONT, "started\n");
10286 
10287 	if (rw == KSTAT_WRITE) {
10288 		rval = EACCES;
10289 	} else {
10290 		rval = 0;
10291 	}
10292 
10293 	if (rval != 0) {
10294 		/*EMPTY*/
10295 		QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10296 	} else {
10297 		/*EMPTY*/
10298 		QL_PRINT_3(CE_CONT, "done\n");
10299 	}
10300 	return (rval);
10301 }
10302 
10303 /*
10304  * ql_load_flash
10305  *	Loads flash.
10306  *
10307  * Input:
10308  *	ha:	adapter state pointer.
10309  *	dp:	data pointer.
10310  *	size:	data length.
10311  *
10312  * Returns:
10313  *	ql local function return status code.
10314  *
10315  * Context:
10316  *	Kernel context.
10317  */
10318 int
10319 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10320 {
10321 	uint32_t	cnt;
10322 	int		rval;
10323 	uint32_t	size_to_offset;
10324 	uint32_t	size_to_compare;
10325 	int		erase_all;
10326 
10327 	if (CFG_IST(ha, CFG_CTRL_2425)) {
10328 		return (ql_24xx_load_flash(ha, dp, size, 0));
10329 	}
10330 
10331 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10332 
10333 	size_to_compare = 0x20000;
10334 	size_to_offset = 0;
10335 	erase_all = 0;
10336 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10337 		if (size == 0x80000) {
10338 			/* Request to flash the entire chip. */
10339 			size_to_compare = 0x80000;
10340 			erase_all = 1;
10341 		} else {
10342 			size_to_compare = 0x40000;
10343 			if (ql_flash_sbus_fpga) {
10344 				size_to_offset = 0x40000;
10345 			}
10346 		}
10347 	}
10348 	if (size > size_to_compare) {
10349 		rval = QL_FUNCTION_PARAMETER_ERROR;
10350 		EL(ha, "failed=%xh\n", rval);
10351 		return (rval);
10352 	}
10353 
10354 	GLOBAL_HW_LOCK();
10355 
10356 	/* Enable Flash Read/Write. */
10357 	ql_flash_enable(ha);
10358 
10359 	/* Erase flash prior to write. */
10360 	rval = ql_erase_flash(ha, erase_all);
10361 
10362 	if (rval == QL_SUCCESS) {
10363 		/* Write data to flash. */
10364 		for (cnt = 0; cnt < size; cnt++) {
10365 			/* Allow other system activity. */
10366 			if (cnt % 0x1000 == 0) {
10367 				ql_delay(ha, 10000);
10368 			}
10369 			rval = ql_program_flash_address(ha,
10370 			    cnt + size_to_offset, *dp++);
10371 			if (rval != QL_SUCCESS) {
10372 				break;
10373 			}
10374 		}
10375 	}
10376 
10377 	ql_flash_disable(ha);
10378 
10379 	GLOBAL_HW_UNLOCK();
10380 
10381 	if (rval != QL_SUCCESS) {
10382 		EL(ha, "failed=%xh\n", rval);
10383 	} else {
10384 		/*EMPTY*/
10385 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10386 	}
10387 	return (rval);
10388 }
10389 
10390 /*
10391  * ql_program_flash_address
10392  *	Program flash address.
10393  *
10394  * Input:
10395  *	ha = adapter state pointer.
10396  *	addr = flash byte address.
10397  *	data = data to be written to flash.
10398  *
10399  * Returns:
10400  *	ql local function return status code.
10401  *
10402  * Context:
10403  *	Kernel context.
10404  */
10405 static int
10406 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10407 {
10408 	int rval;
10409 
10410 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10411 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10412 		ql_write_flash_byte(ha, addr, data);
10413 	} else {
10414 		/* Write Program Command Sequence */
10415 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10416 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10417 		ql_write_flash_byte(ha, 0x5555, 0xa0);
10418 		ql_write_flash_byte(ha, addr, data);
10419 	}
10420 
10421 	/* Wait for write to complete. */
10422 	rval = ql_poll_flash(ha, addr, data);
10423 
10424 	if (rval != QL_SUCCESS) {
10425 		EL(ha, "failed=%xh\n", rval);
10426 	}
10427 	return (rval);
10428 }
10429 
10430 /*
10431  * ql_erase_flash
10432  *	Erases entire flash.
10433  *
10434  * Input:
10435  *	ha = adapter state pointer.
10436  *
10437  * Returns:
10438  *	ql local function return status code.
10439  *
10440  * Context:
10441  *	Kernel context.
10442  */
10443 int
10444 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10445 {
10446 	int		rval;
10447 	uint32_t	erase_delay = 2000000;
10448 	uint32_t	sStartAddr;
10449 	uint32_t	ssize;
10450 	uint32_t	cnt;
10451 	uint8_t		*bfp;
10452 	uint8_t		*tmp;
10453 
10454 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10455 
10456 		if (ql_flash_sbus_fpga == 1) {
10457 			ssize = QL_SBUS_FCODE_SIZE;
10458 			sStartAddr = QL_FCODE_OFFSET;
10459 		} else {
10460 			ssize = QL_FPGA_SIZE;
10461 			sStartAddr = QL_FPGA_OFFSET;
10462 		}
10463 
10464 		erase_delay = 20000000;
10465 
10466 		bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10467 
10468 		/* Save the section of flash we're not updating to buffer */
10469 		tmp = bfp;
10470 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10471 			/* Allow other system activity. */
10472 			if (cnt % 0x1000 == 0) {
10473 				ql_delay(ha, 10000);
10474 			}
10475 			*tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10476 		}
10477 	}
10478 
10479 	/* Chip Erase Command Sequence */
10480 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10481 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10482 	ql_write_flash_byte(ha, 0x5555, 0x80);
10483 	ql_write_flash_byte(ha, 0x5555, 0xaa);
10484 	ql_write_flash_byte(ha, 0x2aaa, 0x55);
10485 	ql_write_flash_byte(ha, 0x5555, 0x10);
10486 
10487 	ql_delay(ha, erase_delay);
10488 
10489 	/* Wait for erase to complete. */
10490 	rval = ql_poll_flash(ha, 0, 0x80);
10491 
10492 	if (rval != QL_SUCCESS) {
10493 		EL(ha, "failed=%xh\n", rval);
10494 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10495 			kmem_free(bfp, ssize);
10496 		}
10497 		return (rval);
10498 	}
10499 
10500 	/* restore the section we saved in the buffer */
10501 	if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10502 		/* Restore the section we saved off */
10503 		tmp = bfp;
10504 		for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10505 			/* Allow other system activity. */
10506 			if (cnt % 0x1000 == 0) {
10507 				ql_delay(ha, 10000);
10508 			}
10509 			rval = ql_program_flash_address(ha, cnt, *tmp++);
10510 			if (rval != QL_SUCCESS) {
10511 				break;
10512 			}
10513 		}
10514 
10515 		kmem_free(bfp, ssize);
10516 	}
10517 
10518 	if (rval != QL_SUCCESS) {
10519 		EL(ha, "failed=%xh\n", rval);
10520 	}
10521 	return (rval);
10522 }
10523 
10524 /*
10525  * ql_poll_flash
10526  *	Polls flash for completion.
10527  *
10528  * Input:
10529  *	ha = adapter state pointer.
10530  *	addr = flash byte address.
10531  *	data = data to be polled.
10532  *
10533  * Returns:
10534  *	ql local function return status code.
10535  *
10536  * Context:
10537  *	Kernel context.
10538  */
10539 int
10540 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10541 {
10542 	uint8_t		flash_data;
10543 	uint32_t	cnt;
10544 	int		rval = QL_FUNCTION_FAILED;
10545 
10546 	poll_data = (uint8_t)(poll_data & BIT_7);
10547 
10548 	/* Wait for 30 seconds for command to finish. */
10549 	for (cnt = 30000000; cnt; cnt--) {
10550 		flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10551 
10552 		if ((flash_data & BIT_7) == poll_data) {
10553 			rval = QL_SUCCESS;
10554 			break;
10555 		}
10556 		if (flash_data & BIT_5 && cnt > 2) {
10557 			cnt = 2;
10558 		}
10559 		drv_usecwait(1);
10560 	}
10561 
10562 	if (rval != QL_SUCCESS) {
10563 		EL(ha, "failed=%xh\n", rval);
10564 	}
10565 	return (rval);
10566 }
10567 
10568 /*
10569  * ql_flash_enable
10570  *	Setup flash for reading/writing.
10571  *
10572  * Input:
10573  *	ha = adapter state pointer.
10574  *
10575  * Context:
10576  *	Kernel context.
10577  */
10578 void
10579 ql_flash_enable(ql_adapter_state_t *ha)
10580 {
10581 	uint16_t	data;
10582 
10583 	/* Enable Flash Read/Write. */
10584 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10585 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10586 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10587 		data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10588 		ddi_put16(ha->sbus_fpga_dev_handle,
10589 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10590 		/* Read reset command sequence */
10591 		ql_write_flash_byte(ha, 0xaaa, 0xaa);
10592 		ql_write_flash_byte(ha, 0x555, 0x55);
10593 		ql_write_flash_byte(ha, 0xaaa, 0x20);
10594 		ql_write_flash_byte(ha, 0x555, 0xf0);
10595 	} else {
10596 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
10597 		    ISP_FLASH_ENABLE);
10598 		WRT16_IO_REG(ha, ctrl_status, data);
10599 
10600 		/* Read/Reset Command Sequence */
10601 		ql_write_flash_byte(ha, 0x5555, 0xaa);
10602 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
10603 		ql_write_flash_byte(ha, 0x5555, 0xf0);
10604 	}
10605 	(void) ql_read_flash_byte(ha, 0);
10606 }
10607 
10608 /*
10609  * ql_flash_disable
10610  *	Disable flash and allow RISC to run.
10611  *
10612  * Input:
10613  *	ha = adapter state pointer.
10614  *
10615  * Context:
10616  *	Kernel context.
10617  */
10618 void
10619 ql_flash_disable(ql_adapter_state_t *ha)
10620 {
10621 	uint16_t	data;
10622 
10623 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10624 		/*
10625 		 * Lock the flash back up.
10626 		 */
10627 		ql_write_flash_byte(ha, 0x555, 0x90);
10628 		ql_write_flash_byte(ha, 0x555, 0x0);
10629 
10630 		data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10631 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10632 		data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
10633 		ddi_put16(ha->sbus_fpga_dev_handle,
10634 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
10635 	} else {
10636 		data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
10637 		    ~ISP_FLASH_ENABLE);
10638 		WRT16_IO_REG(ha, ctrl_status, data);
10639 	}
10640 }
10641 
10642 /*
10643  * ql_write_flash_byte
10644  *	Write byte to flash.
10645  *
10646  * Input:
10647  *	ha = adapter state pointer.
10648  *	addr = flash byte address.
10649  *	data = data to be written.
10650  *
10651  * Context:
10652  *	Kernel context.
10653  */
10654 void
10655 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10656 {
10657 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10658 		ddi_put16(ha->sbus_fpga_dev_handle,
10659 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10660 		    LSW(addr));
10661 		ddi_put16(ha->sbus_fpga_dev_handle,
10662 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10663 		    MSW(addr));
10664 		ddi_put16(ha->sbus_fpga_dev_handle,
10665 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
10666 		    (uint16_t)data);
10667 	} else {
10668 		uint16_t bank_select;
10669 
10670 		/* Setup bit 16 of flash address. */
10671 		bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
10672 
10673 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10674 			bank_select = (uint16_t)(bank_select & ~0xf0);
10675 			bank_select = (uint16_t)(bank_select |
10676 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10677 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10678 		} else {
10679 			if (addr & BIT_16 && !(bank_select &
10680 			    ISP_FLASH_64K_BANK)) {
10681 				bank_select = (uint16_t)(bank_select |
10682 				    ISP_FLASH_64K_BANK);
10683 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10684 			} else if (!(addr & BIT_16) && bank_select &
10685 			    ISP_FLASH_64K_BANK) {
10686 				bank_select = (uint16_t)(bank_select &
10687 				    ~ISP_FLASH_64K_BANK);
10688 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10689 			}
10690 		}
10691 
10692 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10693 			WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
10694 			WRT16_IO_REG(ha, flash_data, (uint16_t)data);
10695 		} else {
10696 			WRT16_IOMAP_REG(ha, flash_address, addr);
10697 			WRT16_IOMAP_REG(ha, flash_data, data);
10698 		}
10699 	}
10700 }
10701 
10702 /*
10703  * ql_read_flash_byte
10704  *	Reads byte from flash, but must read a word from chip.
10705  *
10706  * Input:
10707  *	ha = adapter state pointer.
10708  *	addr = flash byte address.
10709  *
10710  * Returns:
10711  *	byte from flash.
10712  *
10713  * Context:
10714  *	Kernel context.
10715  */
10716 uint8_t
10717 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
10718 {
10719 	uint8_t	data;
10720 
10721 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
10722 		ddi_put16(ha->sbus_fpga_dev_handle,
10723 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
10724 		    LSW(addr));
10725 		ddi_put16(ha->sbus_fpga_dev_handle,
10726 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
10727 		    MSW(addr));
10728 		data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
10729 		    (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
10730 	} else {
10731 		uint16_t	bank_select;
10732 
10733 		/* Setup bit 16 of flash address. */
10734 		bank_select = RD16_IO_REG(ha, ctrl_status);
10735 		if (CFG_IST(ha, CFG_CTRL_6322)) {
10736 			bank_select = (uint16_t)(bank_select & ~0xf0);
10737 			bank_select = (uint16_t)(bank_select |
10738 			    ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
10739 			WRT16_IO_REG(ha, ctrl_status, bank_select);
10740 		} else {
10741 			if (addr & BIT_16 &&
10742 			    !(bank_select & ISP_FLASH_64K_BANK)) {
10743 				bank_select = (uint16_t)(bank_select |
10744 				    ISP_FLASH_64K_BANK);
10745 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10746 			} else if (!(addr & BIT_16) &&
10747 			    bank_select & ISP_FLASH_64K_BANK) {
10748 				bank_select = (uint16_t)(bank_select &
10749 				    ~ISP_FLASH_64K_BANK);
10750 				WRT16_IO_REG(ha, ctrl_status, bank_select);
10751 			}
10752 		}
10753 
10754 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
10755 			WRT16_IO_REG(ha, flash_address, addr);
10756 			data = (uint8_t)RD16_IO_REG(ha, flash_data);
10757 		} else {
10758 			WRT16_IOMAP_REG(ha, flash_address, addr);
10759 			data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
10760 		}
10761 	}
10762 
10763 	return (data);
10764 }
10765 
10766 /*
10767  * ql_24xx_flash_id
10768  *	Get flash IDs.
10769  *
10770  * Input:
10771  *	ha:		adapter state pointer.
10772  *
10773  * Returns:
10774  *	ql local function return status code.
10775  *
10776  * Context:
10777  *	Kernel context.
10778  */
10779 int
10780 ql_24xx_flash_id(ql_adapter_state_t *vha)
10781 {
10782 	int			rval;
10783 	uint32_t		fdata = 0;
10784 	ql_adapter_state_t	*ha = vha->pha;
10785 	ql_xioctl_t		*xp = ha->xioctl;
10786 
10787 
10788 	rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
10789 
10790 	if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_25XX)) {
10791 		fdata = 0;
10792 		rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
10793 		    (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
10794 	}
10795 
10796 	if (rval != QL_SUCCESS) {
10797 		EL(ha, "24xx read_flash failed=%xh\n", rval);
10798 	} else if (fdata != 0) {
10799 		xp->fdesc.flash_manuf = LSB(LSW(fdata));
10800 		xp->fdesc.flash_id = MSB(LSW(fdata));
10801 		xp->fdesc.flash_len = LSB(MSW(fdata));
10802 	} else {
10803 		xp->fdesc.flash_manuf = ATMEL_FLASH;
10804 		xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
10805 		xp->fdesc.flash_len = 0;
10806 	}
10807 
10808 	return (rval);
10809 }
10810 
10811 /*
10812  * ql_24xx_load_flash
10813  *	Loads flash.
10814  *
10815  * Input:
10816  *	ha = adapter state pointer.
10817  *	dp = data pointer.
10818  *	size = data length.
10819  *	faddr = 32bit word flash address.
10820  *
10821  * Returns:
10822  *	ql local function return status code.
10823  *
10824  * Context:
10825  *	Kernel context.
10826  */
10827 int
10828 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
10829     uint32_t faddr)
10830 {
10831 	int			rval;
10832 	uint32_t		cnt, rest_addr, fdata, wc;
10833 	dma_mem_t		dmabuf = {0};
10834 	ql_adapter_state_t	*ha = vha->pha;
10835 	ql_xioctl_t		*xp = ha->xioctl;
10836 
10837 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10838 
10839 	/* start address must be 32 bit word aligned */
10840 	if ((faddr & 0x3) != 0) {
10841 		EL(ha, "incorrect buffer size alignment\n");
10842 		return (QL_FUNCTION_PARAMETER_ERROR);
10843 	}
10844 
10845 	GLOBAL_HW_LOCK();
10846 
10847 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
10848 		EL(ha, "ql_setup_flash failed=%xh\n", rval);
10849 	} else {
10850 		/* Allocate DMA buffer */
10851 		if (CFG_IST(ha, CFG_CTRL_25XX)) {
10852 			if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
10853 			    LITTLE_ENDIAN_DMA, MEM_DATA_ALIGN)) != QL_SUCCESS) {
10854 				EL(ha, "dma alloc failed, rval=%xh\n", rval);
10855 				return (rval);
10856 			}
10857 		}
10858 
10859 		/* setup mask of address range within a sector */
10860 		rest_addr = (xp->fdesc.block_size - 1) >> 2;
10861 
10862 		/* Enable flash write */
10863 		ql_24xx_unprotect_flash(ha);
10864 
10865 		faddr = faddr >> 2;	/* flash gets 32 bit words */
10866 
10867 		/*
10868 		 * Write data to flash.
10869 		 */
10870 		cnt = 0;
10871 		size = (size + 3) >> 2;	/* Round up & convert to dwords */
10872 
10873 		while (cnt < size) {
10874 			/* Beginning of a sector? */
10875 			if ((faddr & rest_addr) == 0) {
10876 				fdata = (faddr & ~rest_addr) << 2;
10877 				fdata = (fdata & 0xff00) |
10878 				    (fdata << 16 & 0xff0000) |
10879 				    (fdata >> 16 & 0xff);
10880 
10881 				if (rest_addr == 0x1fff) {
10882 					/* 32kb sector block erase */
10883 					rval = ql_24xx_write_flash(ha,
10884 					    FLASH_CONF_ADDR | 0x0352, fdata);
10885 				} else {
10886 					/* 64kb sector block erase */
10887 					rval = ql_24xx_write_flash(ha,
10888 					    FLASH_CONF_ADDR | 0x03d8, fdata);
10889 				}
10890 				if (rval != QL_SUCCESS) {
10891 					EL(ha, "Unable to flash sector: "
10892 					    "address=%xh\n", faddr);
10893 					break;
10894 				}
10895 			}
10896 
10897 			/* Write data */
10898 			if (CFG_IST(ha, CFG_CTRL_25XX) &&
10899 			    ((faddr & 0x3f) == 0)) {
10900 				/*
10901 				 * Limit write up to sector boundary.
10902 				 */
10903 				wc = ((~faddr & (rest_addr>>1)) + 1);
10904 
10905 				if (size - cnt < wc) {
10906 					wc = size - cnt;
10907 				}
10908 
10909 				ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
10910 				    (uint8_t *)dmabuf.bp, wc<<2,
10911 				    DDI_DEV_AUTOINCR);
10912 
10913 				rval = ql_wrt_risc_ram(ha, FLASH_DATA_ADDR |
10914 				    faddr, dmabuf.cookie.dmac_laddress, wc);
10915 				if (rval != QL_SUCCESS) {
10916 					EL(ha, "unable to dma to flash "
10917 					    "address=%xh\n", faddr << 2);
10918 					break;
10919 				}
10920 
10921 				cnt += wc;
10922 				faddr += wc;
10923 				dp += wc << 2;
10924 			} else {
10925 				fdata = *dp++;
10926 				fdata |= *dp++ << 8;
10927 				fdata |= *dp++ << 16;
10928 				fdata |= *dp++ << 24;
10929 				rval = ql_24xx_write_flash(ha,
10930 				    FLASH_DATA_ADDR | faddr, fdata);
10931 				if (rval != QL_SUCCESS) {
10932 					EL(ha, "Unable to program flash "
10933 					    "address=%xh data=%xh\n", faddr,
10934 					    *dp);
10935 					break;
10936 				}
10937 				cnt++;
10938 				faddr++;
10939 
10940 				/* Allow other system activity. */
10941 				if (cnt % 0x1000 == 0) {
10942 					ql_delay(ha, 10000);
10943 				}
10944 			}
10945 		}
10946 
10947 		ql_24xx_protect_flash(ha);
10948 
10949 		ql_free_phys(ha, &dmabuf);
10950 	}
10951 
10952 	GLOBAL_HW_UNLOCK();
10953 
10954 	if (rval != QL_SUCCESS) {
10955 		EL(ha, "failed=%xh\n", rval);
10956 	} else {
10957 		/*EMPTY*/
10958 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10959 	}
10960 	return (rval);
10961 }
10962 
10963 /*
10964  * ql_24xx_read_flash
10965  *	Reads a 32bit word from ISP24xx NVRAM/FLASH.
10966  *
10967  * Input:
10968  *	ha:	adapter state pointer.
10969  *	faddr:	NVRAM/FLASH address.
10970  *	bp:	data pointer.
10971  *
10972  * Returns:
10973  *	ql local function return status code.
10974  *
10975  * Context:
10976  *	Kernel context.
10977  */
10978 int
10979 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
10980 {
10981 	uint32_t		timer;
10982 	int			rval = QL_SUCCESS;
10983 	ql_adapter_state_t	*ha = vha->pha;
10984 
10985 	/* Clear access error flag */
10986 	WRT32_IO_REG(ha, ctrl_status,
10987 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
10988 
10989 	WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
10990 
10991 	/* Wait for READ cycle to complete. */
10992 	for (timer = 300000; timer; timer--) {
10993 		if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
10994 			break;
10995 		}
10996 		drv_usecwait(10);
10997 	}
10998 
10999 	if (timer == 0) {
11000 		EL(ha, "failed, timeout\n");
11001 		rval = QL_FUNCTION_TIMEOUT;
11002 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11003 		EL(ha, "failed, access error\n");
11004 		rval = QL_FUNCTION_FAILED;
11005 	}
11006 
11007 	*bp = RD32_IO_REG(ha, flash_data);
11008 
11009 	return (rval);
11010 }
11011 
11012 /*
11013  * ql_24xx_write_flash
11014  *	Writes a 32bit word to ISP24xx NVRAM/FLASH.
11015  *
11016  * Input:
11017  *	ha:	adapter state pointer.
11018  *	addr:	NVRAM/FLASH address.
11019  *	value:	data.
11020  *
11021  * Returns:
11022  *	ql local function return status code.
11023  *
11024  * Context:
11025  *	Kernel context.
11026  */
11027 int
11028 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11029 {
11030 	uint32_t		timer, fdata;
11031 	int			rval = QL_SUCCESS;
11032 	ql_adapter_state_t	*ha = vha->pha;
11033 
11034 	/* Clear access error flag */
11035 	WRT32_IO_REG(ha, ctrl_status,
11036 	    RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11037 
11038 	WRT32_IO_REG(ha, flash_data, data);
11039 	RD32_IO_REG(ha, flash_data);		/* PCI Posting. */
11040 	WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11041 
11042 	/* Wait for Write cycle to complete. */
11043 	for (timer = 3000000; timer; timer--) {
11044 		if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11045 			/* Check flash write in progress. */
11046 			if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11047 				(void) ql_24xx_read_flash(ha,
11048 				    FLASH_CONF_ADDR | 0x005, &fdata);
11049 				if (!(fdata & BIT_0)) {
11050 					break;
11051 				}
11052 			} else {
11053 				break;
11054 			}
11055 		}
11056 		drv_usecwait(10);
11057 	}
11058 	if (timer == 0) {
11059 		EL(ha, "failed, timeout\n");
11060 		rval = QL_FUNCTION_TIMEOUT;
11061 	} else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11062 		EL(ha, "access error\n");
11063 		rval = QL_FUNCTION_FAILED;
11064 	}
11065 
11066 	return (rval);
11067 }
11068 /*
11069  * ql_24xx_unprotect_flash
11070  *	Enable writes
11071  *
11072  * Input:
11073  *	ha:	adapter state pointer.
11074  *
11075  * Context:
11076  *	Kernel context.
11077  */
11078 void
11079 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11080 {
11081 	uint32_t		fdata;
11082 	ql_adapter_state_t	*ha = vha->pha;
11083 	ql_xioctl_t		*xp = ha->xioctl;
11084 
11085 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
11086 
11087 	/* Enable flash write. */
11088 	WRT32_IO_REG(ha, ctrl_status,
11089 	    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11090 	RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11091 
11092 	/*
11093 	 * Remove block write protection (SST and ST) and
11094 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11095 	 * Unprotect sectors.
11096 	 */
11097 	(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11098 	    xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11099 
11100 	if (xp->fdesc.unprotect_sector_cmd != 0) {
11101 		for (fdata = 0; fdata < 0x10; fdata++) {
11102 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11103 			    0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11104 		}
11105 
11106 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11107 		    xp->fdesc.unprotect_sector_cmd, 0x00400f);
11108 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11109 		    xp->fdesc.unprotect_sector_cmd, 0x00600f);
11110 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11111 		    xp->fdesc.unprotect_sector_cmd, 0x00800f);
11112 	}
11113 
11114 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
11115 }
11116 
11117 /*
11118  * ql_24xx_protect_flash
11119  *	Disable writes
11120  *
11121  * Input:
11122  *	ha:	adapter state pointer.
11123  *
11124  * Context:
11125  *	Kernel context.
11126  */
11127 void
11128 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11129 {
11130 	uint32_t		fdata;
11131 	ql_adapter_state_t	*ha = vha->pha;
11132 	ql_xioctl_t		*xp = ha->xioctl;
11133 
11134 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
11135 
11136 	/* Enable flash write. */
11137 	WRT32_IO_REG(ha, ctrl_status,
11138 	    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11139 	RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11140 
11141 	/*
11142 	 * Protect sectors.
11143 	 * Set block write protection (SST and ST) and
11144 	 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11145 	 */
11146 	if (xp->fdesc.protect_sector_cmd != 0) {
11147 		for (fdata = 0; fdata < 0x10; fdata++) {
11148 			(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11149 			    0x330 | xp->fdesc.protect_sector_cmd, fdata);
11150 		}
11151 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11152 		    xp->fdesc.protect_sector_cmd, 0x00400f);
11153 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11154 		    xp->fdesc.protect_sector_cmd, 0x00600f);
11155 		(void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11156 		    xp->fdesc.protect_sector_cmd, 0x00800f);
11157 
11158 		/* TODO: ??? */
11159 		(void) ql_24xx_write_flash(ha,
11160 		    FLASH_CONF_ADDR | 0x101, 0x80);
11161 	} else {
11162 		(void) ql_24xx_write_flash(ha,
11163 		    FLASH_CONF_ADDR | 0x101, 0x9c);
11164 	}
11165 
11166 	/* Disable flash write. */
11167 	WRT32_IO_REG(ha, ctrl_status,
11168 	    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11169 	RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
11170 
11171 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
11172 }
11173 
11174 /*
11175  * ql_dump_firmware
11176  *	Save RISC code state information.
11177  *
11178  * Input:
11179  *	ha = adapter state pointer.
11180  *
11181  * Returns:
11182  *	QL local function return status code.
11183  *
11184  * Context:
11185  *	Kernel context.
11186  */
11187 static int
11188 ql_dump_firmware(ql_adapter_state_t *vha)
11189 {
11190 	int			rval;
11191 	clock_t			timer;
11192 	ql_adapter_state_t	*ha = vha->pha;
11193 
11194 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11195 
11196 	if (ql_dump_state & QL_DUMPING || (ql_dump_state & QL_DUMP_VALID &&
11197 	    !(ql_dump_state & QL_DUMP_UPLOADED))) {
11198 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11199 		return (QL_SUCCESS);
11200 	}
11201 
11202 	ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11203 
11204 	/*
11205 	 * Wait for all outstanding commands to complete
11206 	 */
11207 	(void) ql_wait_outstanding(ha);
11208 
11209 	/* Dump firmware. */
11210 	rval = ql_binary_fw_dump(ha, TRUE);
11211 
11212 	/* Do abort to force restart. */
11213 	ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11214 	EL(ha, "restarting, isp_abort_needed\n");
11215 
11216 	/* Acquire task daemon lock. */
11217 	TASK_DAEMON_LOCK(ha);
11218 
11219 	/* Wait for suspension to end. */
11220 	while (ha->task_daemon_flags & QL_SUSPENDED) {
11221 		ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11222 
11223 		/* 30 seconds from now */
11224 		timer = ddi_get_lbolt();
11225 		timer += drv_usectohz(30000000);
11226 
11227 		if (cv_timedwait(&ha->cv_dr_suspended,
11228 		    &ha->task_daemon_mutex, timer) == -1) {
11229 			/*
11230 			 * The timeout time 'timer' was
11231 			 * reached without the condition
11232 			 * being signaled.
11233 			 */
11234 			break;
11235 		}
11236 	}
11237 
11238 	/* Release task daemon lock. */
11239 	TASK_DAEMON_UNLOCK(ha);
11240 
11241 	if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11242 		/*EMPTY*/
11243 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11244 	} else {
11245 		EL(ha, "failed, rval = %xh\n", rval);
11246 	}
11247 	return (rval);
11248 }
11249 
11250 /*
11251  * ql_binary_fw_dump
11252  *	Dumps binary data from firmware.
11253  *
11254  * Input:
11255  *	ha = adapter state pointer.
11256  *	lock_needed = mailbox lock needed.
11257  *
11258  * Returns:
11259  *	ql local function return status code.
11260  *
11261  * Context:
11262  *	Interrupt or Kernel context, no mailbox commands allowed.
11263  */
11264 int
11265 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11266 {
11267 	clock_t			timer;
11268 	mbx_cmd_t		mc;
11269 	mbx_cmd_t		*mcp = &mc;
11270 	int			rval = QL_SUCCESS;
11271 	ql_adapter_state_t	*ha = vha->pha;
11272 
11273 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11274 
11275 	if (ql_dump_state & QL_DUMPING || (ql_dump_state & QL_DUMP_VALID &&
11276 	    !(ql_dump_state & QL_DUMP_UPLOADED))) {
11277 		EL(ha, "dump already done, qds=%x\n", ql_dump_state);
11278 		return (QL_DATA_EXISTS);
11279 	}
11280 
11281 	ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11282 	ql_dump_state |= QL_DUMPING;
11283 
11284 	if (lock_needed == TRUE) {
11285 		/* Acquire mailbox register lock. */
11286 		MBX_REGISTER_LOCK(ha);
11287 
11288 		/* Check for mailbox available, if not wait for signal. */
11289 		while (ha->mailbox_flags & MBX_BUSY_FLG) {
11290 			ha->mailbox_flags = (uint8_t)
11291 			    (ha->mailbox_flags | MBX_WANT_FLG);
11292 
11293 			/* 30 seconds from now */
11294 			timer = ddi_get_lbolt();
11295 			timer += (ha->mcp->timeout + 2) *
11296 			    drv_usectohz(1000000);
11297 			if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11298 			    timer) == -1) {
11299 				/*
11300 				 * The timeout time 'timer' was
11301 				 * reached without the condition
11302 				 * being signaled.
11303 				 */
11304 
11305 				/* Release mailbox register lock. */
11306 				MBX_REGISTER_UNLOCK(ha);
11307 
11308 				EL(ha, "failed, rval = %xh\n",
11309 				    QL_FUNCTION_TIMEOUT);
11310 				return (QL_FUNCTION_TIMEOUT);
11311 			}
11312 		}
11313 
11314 		/* Set busy flag. */
11315 		ha->mailbox_flags = (uint8_t)
11316 		    (ha->mailbox_flags | MBX_BUSY_FLG);
11317 		mcp->timeout = 120;
11318 		ha->mcp = mcp;
11319 
11320 		/* Release mailbox register lock. */
11321 		MBX_REGISTER_UNLOCK(ha);
11322 	}
11323 
11324 	/* Free previous dump buffer. */
11325 	if (ql_dump_ptr != NULL) {
11326 		kmem_free(ql_dump_ptr, ql_dump_size);
11327 		ql_dump_ptr = NULL;
11328 	}
11329 
11330 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11331 		ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11332 		    ha->fw_ext_memory_size);
11333 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11334 		ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11335 		    ha->fw_ext_memory_size);
11336 	} else {
11337 		ql_dump_size = sizeof (ql_fw_dump_t);
11338 	}
11339 
11340 	if ((ql_dump_ptr = kmem_zalloc(ql_dump_size, KM_NOSLEEP)) == NULL) {
11341 		rval = QL_MEMORY_ALLOC_FAILED;
11342 	} else {
11343 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11344 			rval = ql_2300_binary_fw_dump(ha, ql_dump_ptr);
11345 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11346 			rval = ql_25xx_binary_fw_dump(ha, ql_dump_ptr);
11347 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
11348 			rval = ql_24xx_binary_fw_dump(ha, ql_dump_ptr);
11349 		} else {
11350 			rval = ql_2200_binary_fw_dump(ha, ql_dump_ptr);
11351 		}
11352 	}
11353 
11354 	/* Reset ISP chip. */
11355 	ql_reset_chip(ha);
11356 
11357 	if (rval != QL_SUCCESS) {
11358 		if (ql_dump_ptr != NULL) {
11359 			kmem_free(ql_dump_ptr, ql_dump_size);
11360 			ql_dump_ptr = NULL;
11361 		}
11362 		ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11363 		    QL_DUMP_UPLOADED);
11364 		EL(ha, "failed, rval = %xh\n", rval);
11365 	} else {
11366 		ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11367 		ql_dump_state |= QL_DUMP_VALID;
11368 		EL(ha, "done\n");
11369 	}
11370 	return (rval);
11371 }
11372 
11373 /*
11374  * ql_ascii_fw_dump
11375  *	Converts firmware binary dump to ascii.
11376  *
11377  * Input:
11378  *	ha = adapter state pointer.
11379  *	bptr = buffer pointer.
11380  *
11381  * Returns:
11382  *	Amount of data buffer used.
11383  *
11384  * Context:
11385  *	Kernel context.
11386  */
11387 size_t
11388 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11389 {
11390 	uint32_t		cnt;
11391 	caddr_t			bp;
11392 	ql_fw_dump_t		*fw = ql_dump_ptr;
11393 	int			mbox_cnt;
11394 	ql_adapter_state_t	*ha = vha->pha;
11395 
11396 	if (CFG_IST(ha, CFG_CTRL_2422)) {
11397 		return (ql_24xx_ascii_fw_dump(ha, bufp));
11398 	} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11399 		return (ql_25xx_ascii_fw_dump(ha, bufp));
11400 	}
11401 
11402 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11403 
11404 	if (CFG_IST(ha, CFG_CTRL_2300)) {
11405 		(void) sprintf(bufp, "\nISP 2300IP ");
11406 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
11407 		(void) sprintf(bufp, "\nISP 6322FLX ");
11408 	} else {
11409 		(void) sprintf(bufp, "\nISP 2200IP ");
11410 	}
11411 
11412 	bp = bufp + strlen(bufp);
11413 	(void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11414 	    ha->fw_major_version, ha->fw_minor_version,
11415 	    ha->fw_subminor_version);
11416 
11417 	(void) strcat(bufp, "\nPBIU Registers:");
11418 	bp = bufp + strlen(bufp);
11419 	for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11420 		if (cnt % 8 == 0) {
11421 			*bp++ = '\n';
11422 		}
11423 		(void) sprintf(bp, "%04x  ", fw->pbiu_reg[cnt]);
11424 		bp = bp + 6;
11425 	}
11426 
11427 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11428 		(void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11429 		    "registers:");
11430 		bp = bufp + strlen(bufp);
11431 		for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11432 			if (cnt % 8 == 0) {
11433 				*bp++ = '\n';
11434 			}
11435 			(void) sprintf(bp, "%04x  ", fw->risc_host_reg[cnt]);
11436 			bp = bp + 6;
11437 		}
11438 	}
11439 
11440 	(void) strcat(bp, "\n\nMailbox Registers:");
11441 	bp = bufp + strlen(bufp);
11442 	mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11443 	for (cnt = 0; cnt < mbox_cnt; cnt++) {
11444 		if (cnt % 8 == 0) {
11445 			*bp++ = '\n';
11446 		}
11447 		(void) sprintf(bp, "%04x  ", fw->mailbox_reg[cnt]);
11448 		bp = bp + 6;
11449 	}
11450 
11451 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11452 		(void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11453 		bp = bufp + strlen(bufp);
11454 		for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
11455 			if (cnt % 8 == 0) {
11456 				*bp++ = '\n';
11457 			}
11458 			(void) sprintf(bp, "%04x  ", fw->resp_dma_reg[cnt]);
11459 			bp = bp + 6;
11460 		}
11461 	}
11462 
11463 	(void) strcat(bp, "\n\nDMA Registers:");
11464 	bp = bufp + strlen(bufp);
11465 	for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
11466 		if (cnt % 8 == 0) {
11467 			*bp++ = '\n';
11468 		}
11469 		(void) sprintf(bp, "%04x  ", fw->dma_reg[cnt]);
11470 		bp = bp + 6;
11471 	}
11472 
11473 	(void) strcat(bp, "\n\nRISC Hardware Registers:");
11474 	bp = bufp + strlen(bufp);
11475 	for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
11476 		if (cnt % 8 == 0) {
11477 			*bp++ = '\n';
11478 		}
11479 		(void) sprintf(bp, "%04x  ", fw->risc_hdw_reg[cnt]);
11480 		bp = bp + 6;
11481 	}
11482 
11483 	(void) strcat(bp, "\n\nRISC GP0 Registers:");
11484 	bp = bufp + strlen(bufp);
11485 	for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
11486 		if (cnt % 8 == 0) {
11487 			*bp++ = '\n';
11488 		}
11489 		(void) sprintf(bp, "%04x  ", fw->risc_gp0_reg[cnt]);
11490 		bp = bp + 6;
11491 	}
11492 
11493 	(void) strcat(bp, "\n\nRISC GP1 Registers:");
11494 	bp = bufp + strlen(bufp);
11495 	for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
11496 		if (cnt % 8 == 0) {
11497 			*bp++ = '\n';
11498 		}
11499 		(void) sprintf(bp, "%04x  ", fw->risc_gp1_reg[cnt]);
11500 		bp = bp + 6;
11501 	}
11502 
11503 	(void) strcat(bp, "\n\nRISC GP2 Registers:");
11504 	bp = bufp + strlen(bufp);
11505 	for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
11506 		if (cnt % 8 == 0) {
11507 			*bp++ = '\n';
11508 		}
11509 		(void) sprintf(bp, "%04x  ", fw->risc_gp2_reg[cnt]);
11510 		bp = bp + 6;
11511 	}
11512 
11513 	(void) strcat(bp, "\n\nRISC GP3 Registers:");
11514 	bp = bufp + strlen(bufp);
11515 	for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
11516 		if (cnt % 8 == 0) {
11517 			*bp++ = '\n';
11518 		}
11519 		(void) sprintf(bp, "%04x  ", fw->risc_gp3_reg[cnt]);
11520 		bp = bp + 6;
11521 	}
11522 
11523 	(void) strcat(bp, "\n\nRISC GP4 Registers:");
11524 	bp = bufp + strlen(bufp);
11525 	for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
11526 		if (cnt % 8 == 0) {
11527 			*bp++ = '\n';
11528 		}
11529 		(void) sprintf(bp, "%04x  ", fw->risc_gp4_reg[cnt]);
11530 		bp = bp + 6;
11531 	}
11532 
11533 	(void) strcat(bp, "\n\nRISC GP5 Registers:");
11534 	bp = bufp + strlen(bufp);
11535 	for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
11536 		if (cnt % 8 == 0) {
11537 			*bp++ = '\n';
11538 		}
11539 		(void) sprintf(bp, "%04x  ", fw->risc_gp5_reg[cnt]);
11540 		bp = bp + 6;
11541 	}
11542 
11543 	(void) strcat(bp, "\n\nRISC GP6 Registers:");
11544 	bp = bufp + strlen(bufp);
11545 	for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
11546 		if (cnt % 8 == 0) {
11547 			*bp++ = '\n';
11548 		}
11549 		(void) sprintf(bp, "%04x  ", fw->risc_gp6_reg[cnt]);
11550 		bp = bp + 6;
11551 	}
11552 
11553 	(void) strcat(bp, "\n\nRISC GP7 Registers:");
11554 	bp = bufp + strlen(bufp);
11555 	for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
11556 		if (cnt % 8 == 0) {
11557 			*bp++ = '\n';
11558 		}
11559 		(void) sprintf(bp, "%04x  ", fw->risc_gp7_reg[cnt]);
11560 		bp = bp + 6;
11561 	}
11562 
11563 	(void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
11564 	bp = bufp + strlen(bufp);
11565 	for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
11566 		if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
11567 		    CFG_CTRL_6322)) == 0))) {
11568 			break;
11569 		}
11570 		if (cnt % 8 == 0) {
11571 			*bp++ = '\n';
11572 		}
11573 		(void) sprintf(bp, "%04x  ", fw->frame_buf_hdw_reg[cnt]);
11574 		bp = bp + 6;
11575 	}
11576 
11577 	(void) strcat(bp, "\n\nFPM B0 Registers:");
11578 	bp = bufp + strlen(bufp);
11579 	for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
11580 		if (cnt % 8 == 0) {
11581 			*bp++ = '\n';
11582 		}
11583 		(void) sprintf(bp, "%04x  ", fw->fpm_b0_reg[cnt]);
11584 		bp = bp + 6;
11585 	}
11586 
11587 	(void) strcat(bp, "\n\nFPM B1 Registers:");
11588 	bp = bufp + strlen(bufp);
11589 	for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
11590 		if (cnt % 8 == 0) {
11591 			*bp++ = '\n';
11592 		}
11593 		(void) sprintf(bp, "%04x  ", fw->fpm_b1_reg[cnt]);
11594 		bp = bp + 6;
11595 	}
11596 
11597 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11598 		(void) strcat(bp, "\n\nCode RAM Dump:");
11599 		bp = bufp + strlen(bufp);
11600 		for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
11601 			if (cnt % 8 == 0) {
11602 				(void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
11603 				bp = bp + 8;
11604 			}
11605 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11606 			bp = bp + 6;
11607 		}
11608 
11609 		(void) strcat(bp, "\n\nStack RAM Dump:");
11610 		bp = bufp + strlen(bufp);
11611 		for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
11612 			if (cnt % 8 == 0) {
11613 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
11614 				bp = bp + 8;
11615 			}
11616 			(void) sprintf(bp, "%04x  ", fw->stack_ram[cnt]);
11617 			bp = bp + 6;
11618 		}
11619 
11620 		(void) strcat(bp, "\n\nData RAM Dump:");
11621 		bp = bufp + strlen(bufp);
11622 		for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
11623 			if (cnt % 8 == 0) {
11624 				(void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
11625 				bp = bp + 8;
11626 			}
11627 			(void) sprintf(bp, "%04x  ", fw->data_ram[cnt]);
11628 			bp = bp + 6;
11629 		}
11630 
11631 		(void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
11632 	} else {
11633 		(void) strcat(bp, "\n\nRISC SRAM:");
11634 		bp = bufp + strlen(bufp);
11635 		for (cnt = 0; cnt < 0xf000; cnt++) {
11636 			if (cnt % 8 == 0) {
11637 				(void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
11638 				bp = bp + 7;
11639 			}
11640 			(void) sprintf(bp, "%04x  ", fw->risc_ram[cnt]);
11641 			bp = bp + 6;
11642 		}
11643 	}
11644 
11645 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11646 
11647 	return (strlen(bufp));
11648 }
11649 
11650 /*
11651  * ql_24xx_ascii_fw_dump
11652  *	Converts ISP24xx firmware binary dump to ascii.
11653  *
11654  * Input:
11655  *	ha = adapter state pointer.
11656  *	bptr = buffer pointer.
11657  *
11658  * Returns:
11659  *	Amount of data buffer used.
11660  *
11661  * Context:
11662  *	Kernel context.
11663  */
11664 static size_t
11665 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
11666 {
11667 	uint32_t		cnt;
11668 	caddr_t			bp = bufp;
11669 	ql_24xx_fw_dump_t	*fw = ql_dump_ptr;
11670 
11671 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11672 
11673 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
11674 	    ha->fw_major_version, ha->fw_minor_version,
11675 	    ha->fw_subminor_version, ha->fw_attributes);
11676 	bp += strlen(bp);
11677 
11678 	(void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
11679 
11680 	(void) strcat(bp, "\nHost Interface Registers");
11681 	bp += strlen(bp);
11682 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
11683 		if (cnt % 8 == 0) {
11684 			(void) sprintf(bp++, "\n");
11685 		}
11686 
11687 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
11688 		bp += 9;
11689 	}
11690 
11691 	(void) sprintf(bp, "\n\nMailbox Registers");
11692 	bp += strlen(bp);
11693 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
11694 		if (cnt % 16 == 0) {
11695 			(void) sprintf(bp++, "\n");
11696 		}
11697 
11698 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
11699 		bp += 5;
11700 	}
11701 
11702 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
11703 	bp += strlen(bp);
11704 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
11705 		if (cnt % 8 == 0) {
11706 			(void) sprintf(bp++, "\n");
11707 		}
11708 
11709 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
11710 		bp += 9;
11711 	}
11712 
11713 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
11714 	bp += strlen(bp);
11715 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
11716 		if (cnt % 8 == 0) {
11717 			(void) sprintf(bp++, "\n");
11718 		}
11719 
11720 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
11721 		bp += 9;
11722 	}
11723 
11724 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
11725 	bp += strlen(bp);
11726 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
11727 		if (cnt % 8 == 0) {
11728 			(void) sprintf(bp++, "\n");
11729 		}
11730 
11731 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
11732 		bp += 9;
11733 	}
11734 
11735 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
11736 	bp += strlen(bp);
11737 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
11738 		if (cnt % 8 == 0) {
11739 			(void) sprintf(bp++, "\n");
11740 		}
11741 
11742 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
11743 		bp += 9;
11744 	}
11745 
11746 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
11747 	bp += strlen(bp);
11748 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
11749 		if (cnt % 8 == 0) {
11750 			(void) sprintf(bp++, "\n");
11751 		}
11752 
11753 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
11754 		bp += 9;
11755 	}
11756 
11757 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
11758 	bp += strlen(bp);
11759 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
11760 		if (cnt % 8 == 0) {
11761 			(void) sprintf(bp++, "\n");
11762 		}
11763 
11764 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
11765 		bp += 9;
11766 	}
11767 
11768 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
11769 	bp += strlen(bp);
11770 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
11771 		if (cnt % 8 == 0) {
11772 			(void) sprintf(bp++, "\n");
11773 		}
11774 
11775 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
11776 		bp += 9;
11777 	}
11778 
11779 	(void) sprintf(bp, "\n\nCommand DMA Registers");
11780 	bp += strlen(bp);
11781 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
11782 		if (cnt % 8 == 0) {
11783 			(void) sprintf(bp++, "\n");
11784 		}
11785 
11786 		(void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
11787 		bp += 9;
11788 	}
11789 
11790 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
11791 	bp += strlen(bp);
11792 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
11793 		if (cnt % 8 == 0) {
11794 			(void) sprintf(bp++, "\n");
11795 		}
11796 
11797 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
11798 		bp += 9;
11799 	}
11800 
11801 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
11802 	bp += strlen(bp);
11803 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
11804 		if (cnt % 8 == 0) {
11805 			(void) sprintf(bp++, "\n");
11806 		}
11807 
11808 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
11809 		bp += 9;
11810 	}
11811 
11812 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
11813 	bp += strlen(bp);
11814 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
11815 		if (cnt % 8 == 0) {
11816 			(void) sprintf(bp++, "\n");
11817 		}
11818 
11819 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
11820 		bp += 9;
11821 	}
11822 
11823 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
11824 	bp += strlen(bp);
11825 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
11826 		if (cnt % 8 == 0) {
11827 			(void) sprintf(bp++, "\n");
11828 		}
11829 
11830 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
11831 		bp += 9;
11832 	}
11833 
11834 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
11835 	bp += strlen(bp);
11836 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
11837 		if (cnt % 8 == 0) {
11838 			(void) sprintf(bp++, "\n");
11839 		}
11840 
11841 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
11842 		bp += 9;
11843 	}
11844 
11845 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
11846 	bp += strlen(bp);
11847 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
11848 		if (cnt % 8 == 0) {
11849 			(void) sprintf(bp++, "\n");
11850 		}
11851 
11852 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
11853 		bp += 9;
11854 	}
11855 
11856 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
11857 	bp += strlen(bp);
11858 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
11859 		if (cnt % 8 == 0) {
11860 			(void) sprintf(bp++, "\n");
11861 		}
11862 
11863 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
11864 		bp += 9;
11865 	}
11866 
11867 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
11868 	bp += strlen(bp);
11869 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
11870 		if (cnt % 8 == 0) {
11871 			(void) sprintf(bp++, "\n");
11872 		}
11873 
11874 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
11875 		bp += 9;
11876 	}
11877 
11878 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
11879 	bp += strlen(bp);
11880 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
11881 		if (cnt % 8 == 0) {
11882 			(void) sprintf(bp++, "\n");
11883 		}
11884 
11885 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
11886 		bp += 9;
11887 	}
11888 
11889 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
11890 	bp += strlen(bp);
11891 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
11892 		if (cnt % 8 == 0) {
11893 			(void) sprintf(bp++, "\n");
11894 		}
11895 
11896 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
11897 		bp += 9;
11898 	}
11899 
11900 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
11901 	bp += strlen(bp);
11902 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
11903 		if (cnt % 8 == 0) {
11904 			(void) sprintf(bp++, "\n");
11905 		}
11906 
11907 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
11908 		bp += 9;
11909 	}
11910 
11911 	(void) sprintf(bp, "\n\nRISC GP Registers");
11912 	bp += strlen(bp);
11913 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
11914 		if (cnt % 8 == 0) {
11915 			(void) sprintf(bp++, "\n");
11916 		}
11917 
11918 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
11919 		bp += 9;
11920 	}
11921 
11922 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
11923 	bp += strlen(bp);
11924 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
11925 		if (cnt % 8 == 0) {
11926 			(void) sprintf(bp++, "\n");
11927 		}
11928 
11929 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
11930 		bp += 9;
11931 	}
11932 
11933 	(void) sprintf(bp, "\n\nLMC Registers");
11934 	bp += strlen(bp);
11935 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
11936 		if (cnt % 8 == 0) {
11937 			(void) sprintf(bp++, "\n");
11938 		}
11939 
11940 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
11941 		bp += 9;
11942 	}
11943 
11944 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
11945 	bp += strlen(bp);
11946 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
11947 		if (cnt % 8 == 0) {
11948 			(void) sprintf(bp++, "\n");
11949 		}
11950 
11951 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
11952 		bp += 9;
11953 	}
11954 
11955 	(void) sprintf(bp, "\n\nFB Hardware Registers");
11956 	bp += strlen(bp);
11957 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
11958 		if (cnt % 8 == 0) {
11959 			(void) sprintf(bp++, "\n");
11960 		}
11961 
11962 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
11963 		bp += 9;
11964 	}
11965 
11966 	(void) sprintf(bp, "\n\nCode RAM");
11967 	bp += strlen(bp);
11968 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
11969 		if (cnt % 8 == 0) {
11970 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
11971 			bp += 11;
11972 		}
11973 
11974 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
11975 		bp += 9;
11976 	}
11977 
11978 	(void) sprintf(bp, "\n\nExternal Memory");
11979 	bp += strlen(bp);
11980 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
11981 		if (cnt % 8 == 0) {
11982 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
11983 			bp += 11;
11984 		}
11985 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
11986 		bp += 9;
11987 	}
11988 
11989 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
11990 	bp += strlen(bp);
11991 
11992 	cnt = (uintptr_t)bp - (uintptr_t)bufp;
11993 
11994 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
11995 
11996 	return (cnt);
11997 }
11998 
11999 /*
12000  * ql_25xx_ascii_fw_dump
12001  *	Converts ISP25xx firmware binary dump to ascii.
12002  *
12003  * Input:
12004  *	ha = adapter state pointer.
12005  *	bptr = buffer pointer.
12006  *
12007  * Returns:
12008  *	Amount of data buffer used.
12009  *
12010  * Context:
12011  *	Kernel context.
12012  */
12013 static size_t
12014 ql_25xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12015 {
12016 	uint32_t		cnt;
12017 	caddr_t			bp = bufp;
12018 	ql_25xx_fw_dump_t	*fw = ql_dump_ptr;
12019 
12020 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12021 
12022 	(void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12023 	    ha->fw_major_version, ha->fw_minor_version,
12024 	    ha->fw_subminor_version, ha->fw_attributes);
12025 	bp += strlen(bp);
12026 
12027 	(void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12028 
12029 	(void) sprintf(bp, "\n\nHostRisc Registers");
12030 	bp += strlen(bp);
12031 	for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12032 		if (cnt % 8 == 0) {
12033 			(void) sprintf(bp++, "\n");
12034 		}
12035 		(void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12036 		bp += 9;
12037 	}
12038 
12039 	(void) sprintf(bp, "\n\nPCIe Registers");
12040 	bp += strlen(bp);
12041 	for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12042 		if (cnt % 8 == 0) {
12043 			(void) sprintf(bp++, "\n");
12044 		}
12045 		(void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12046 		bp += 9;
12047 	}
12048 
12049 	(void) strcat(bp, "\nHost Interface Registers");
12050 	bp += strlen(bp);
12051 	for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12052 		if (cnt % 8 == 0) {
12053 			(void) sprintf(bp++, "\n");
12054 		}
12055 
12056 		(void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12057 		bp += 9;
12058 	}
12059 
12060 	(void) sprintf(bp, "\n\nMailbox Registers");
12061 	bp += strlen(bp);
12062 	for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12063 		if (cnt % 16 == 0) {
12064 			(void) sprintf(bp++, "\n");
12065 		}
12066 
12067 		(void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12068 		bp += 5;
12069 	}
12070 
12071 	(void) sprintf(bp, "\n\nXSEQ GP Registers");
12072 	bp += strlen(bp);
12073 	for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12074 		if (cnt % 8 == 0) {
12075 			(void) sprintf(bp++, "\n");
12076 		}
12077 
12078 		(void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12079 		bp += 9;
12080 	}
12081 
12082 	(void) sprintf(bp, "\n\nXSEQ-0 Registers");
12083 	bp += strlen(bp);
12084 	for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12085 		if (cnt % 8 == 0) {
12086 			(void) sprintf(bp++, "\n");
12087 		}
12088 
12089 		(void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12090 		bp += 9;
12091 	}
12092 
12093 	(void) sprintf(bp, "\n\nXSEQ-1 Registers");
12094 	bp += strlen(bp);
12095 	for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12096 		if (cnt % 8 == 0) {
12097 			(void) sprintf(bp++, "\n");
12098 		}
12099 
12100 		(void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12101 		bp += 9;
12102 	}
12103 
12104 	(void) sprintf(bp, "\n\nRSEQ GP Registers");
12105 	bp += strlen(bp);
12106 	for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12107 		if (cnt % 8 == 0) {
12108 			(void) sprintf(bp++, "\n");
12109 		}
12110 
12111 		(void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12112 		bp += 9;
12113 	}
12114 
12115 	(void) sprintf(bp, "\n\nRSEQ-0 Registers");
12116 	bp += strlen(bp);
12117 	for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12118 		if (cnt % 8 == 0) {
12119 			(void) sprintf(bp++, "\n");
12120 		}
12121 
12122 		(void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12123 		bp += 9;
12124 	}
12125 
12126 	(void) sprintf(bp, "\n\nRSEQ-1 Registers");
12127 	bp += strlen(bp);
12128 	for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12129 		if (cnt % 8 == 0) {
12130 			(void) sprintf(bp++, "\n");
12131 		}
12132 
12133 		(void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12134 		bp += 9;
12135 	}
12136 
12137 	(void) sprintf(bp, "\n\nRSEQ-2 Registers");
12138 	bp += strlen(bp);
12139 	for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12140 		if (cnt % 8 == 0) {
12141 			(void) sprintf(bp++, "\n");
12142 		}
12143 
12144 		(void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12145 		bp += 9;
12146 	}
12147 
12148 	(void) sprintf(bp, "\n\nASEQ GP Registers");
12149 	bp += strlen(bp);
12150 	for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12151 		if (cnt % 8 == 0) {
12152 			(void) sprintf(bp++, "\n");
12153 		}
12154 
12155 		(void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12156 		bp += 9;
12157 	}
12158 
12159 	(void) sprintf(bp, "\n\nASEQ-0 GP Registers");
12160 	bp += strlen(bp);
12161 	for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12162 		if (cnt % 8 == 0) {
12163 			(void) sprintf(bp++, "\n");
12164 		}
12165 
12166 		(void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12167 		bp += 9;
12168 	}
12169 
12170 	(void) sprintf(bp, "\n\nASEQ-1 GP Registers");
12171 	bp += strlen(bp);
12172 	for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12173 		if (cnt % 8 == 0) {
12174 			(void) sprintf(bp++, "\n");
12175 		}
12176 
12177 		(void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12178 		bp += 9;
12179 	}
12180 
12181 	(void) sprintf(bp, "\n\nASEQ-2 GP Registers");
12182 	bp += strlen(bp);
12183 	for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12184 		if (cnt % 8 == 0) {
12185 			(void) sprintf(bp++, "\n");
12186 		}
12187 
12188 		(void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12189 		bp += 9;
12190 	}
12191 
12192 	(void) sprintf(bp, "\n\nCommand DMA Registers");
12193 	bp += strlen(bp);
12194 	for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12195 		if (cnt % 8 == 0) {
12196 			(void) sprintf(bp++, "\n");
12197 		}
12198 
12199 		(void)  sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12200 		bp += 9;
12201 	}
12202 
12203 	(void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12204 	bp += strlen(bp);
12205 	for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12206 		if (cnt % 8 == 0) {
12207 			(void) sprintf(bp++, "\n");
12208 		}
12209 
12210 		(void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12211 		bp += 9;
12212 	}
12213 
12214 	(void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12215 	bp += strlen(bp);
12216 	for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12217 		if (cnt % 8 == 0) {
12218 			(void) sprintf(bp++, "\n");
12219 		}
12220 
12221 		(void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12222 		bp += 9;
12223 	}
12224 
12225 	(void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12226 	bp += strlen(bp);
12227 	for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12228 		if (cnt % 8 == 0) {
12229 			(void) sprintf(bp++, "\n");
12230 		}
12231 
12232 		(void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12233 		bp += 9;
12234 	}
12235 
12236 	(void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12237 	bp += strlen(bp);
12238 	for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12239 		if (cnt % 8 == 0) {
12240 			(void) sprintf(bp++, "\n");
12241 		}
12242 
12243 		(void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12244 		bp += 9;
12245 	}
12246 
12247 	(void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12248 	bp += strlen(bp);
12249 	for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12250 		if (cnt % 8 == 0) {
12251 			(void) sprintf(bp++, "\n");
12252 		}
12253 
12254 		(void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12255 		bp += 9;
12256 	}
12257 
12258 	(void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12259 	bp += strlen(bp);
12260 	for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12261 		if (cnt % 8 == 0) {
12262 			(void) sprintf(bp++, "\n");
12263 		}
12264 
12265 		(void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12266 		bp += 9;
12267 	}
12268 
12269 	(void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12270 	bp += strlen(bp);
12271 	for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12272 		if (cnt % 8 == 0) {
12273 			(void) sprintf(bp++, "\n");
12274 		}
12275 
12276 		(void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12277 		bp += 9;
12278 	}
12279 
12280 	(void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12281 	bp += strlen(bp);
12282 	for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12283 		if (cnt % 8 == 0) {
12284 			(void) sprintf(bp++, "\n");
12285 		}
12286 
12287 		(void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12288 		bp += 9;
12289 	}
12290 
12291 	(void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12292 	bp += strlen(bp);
12293 	for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12294 		if (cnt % 8 == 0) {
12295 			(void) sprintf(bp++, "\n");
12296 		}
12297 
12298 		(void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12299 		bp += 9;
12300 	}
12301 
12302 	(void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12303 	bp += strlen(bp);
12304 	for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12305 		if (cnt % 8 == 0) {
12306 			(void) sprintf(bp++, "\n");
12307 		}
12308 
12309 		(void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12310 		bp += 9;
12311 	}
12312 
12313 	(void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12314 	bp += strlen(bp);
12315 	for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12316 		if (cnt % 8 == 0) {
12317 			(void) sprintf(bp++, "\n");
12318 		}
12319 
12320 		(void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12321 		bp += 9;
12322 	}
12323 
12324 	(void) sprintf(bp, "\n\nRISC GP Registers");
12325 	bp += strlen(bp);
12326 	for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12327 		if (cnt % 8 == 0) {
12328 			(void) sprintf(bp++, "\n");
12329 		}
12330 
12331 		(void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12332 		bp += 9;
12333 	}
12334 
12335 	(void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12336 	bp += strlen(bp);
12337 	for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12338 		if (cnt % 8 == 0) {
12339 			(void) sprintf(bp++, "\n");
12340 		}
12341 
12342 		(void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12343 		bp += 9;
12344 	}
12345 
12346 	(void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12347 	    fw->risc_io);
12348 	bp += strlen(bp);
12349 
12350 	(void) sprintf(bp, "\n\nLMC Registers");
12351 	bp += strlen(bp);
12352 	for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12353 		if (cnt % 8 == 0) {
12354 			(void) sprintf(bp++, "\n");
12355 		}
12356 
12357 		(void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12358 		bp += 9;
12359 	}
12360 
12361 	(void) sprintf(bp, "\n\nFPM Hardware Registers");
12362 	bp += strlen(bp);
12363 	for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12364 		if (cnt % 8 == 0) {
12365 			(void) sprintf(bp++, "\n");
12366 		}
12367 
12368 		(void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12369 		bp += 9;
12370 	}
12371 
12372 	(void) sprintf(bp, "\n\nFB Hardware Registers");
12373 	bp += strlen(bp);
12374 	for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12375 		if (cnt % 8 == 0) {
12376 			(void) sprintf(bp++, "\n");
12377 		}
12378 
12379 		(void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12380 		bp += 9;
12381 	}
12382 
12383 	(void) sprintf(bp, "\n\nCode RAM");
12384 	bp += strlen(bp);
12385 	for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12386 		if (cnt % 8 == 0) {
12387 			(void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12388 			bp += 11;
12389 		}
12390 
12391 		(void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12392 		bp += 9;
12393 	}
12394 
12395 	(void) sprintf(bp, "\n\nExternal Memory");
12396 	bp += strlen(bp);
12397 	for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12398 		if (cnt % 8 == 0) {
12399 			(void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12400 			bp += 11;
12401 		}
12402 		(void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12403 		bp += 9;
12404 	}
12405 
12406 	(void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12407 	bp += strlen(bp);
12408 
12409 	(void) sprintf(bp, "\n\nRequest Queue");
12410 	bp += strlen(bp);
12411 	for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 2; cnt++) {
12412 		if (cnt % 16 == 0) {
12413 			(void) sprintf(bp++, "\n");
12414 		}
12415 		(void) sprintf(bp, "%04x ", fw->req_rsp_q[cnt]);
12416 		bp += 5;
12417 	}
12418 
12419 	(void) sprintf(bp, "\n\nResponse Queue");
12420 	bp += strlen(bp);
12421 	for (cnt = (REQUEST_QUEUE_SIZE / 2);
12422 	    cnt < (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) / 2; cnt++) {
12423 		if (cnt % 16 == 0) {
12424 			(void) sprintf(bp++, "\n");
12425 		}
12426 		(void) sprintf(bp, "%04x ", fw->req_rsp_q[cnt]);
12427 		bp += 5;
12428 	}
12429 
12430 	cnt = (uintptr_t)bp - (uintptr_t)bufp;
12431 
12432 	QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12433 
12434 	return (cnt);
12435 }
12436 
12437 /*
12438  * ql_2200_binary_fw_dump
12439  *
12440  * Input:
12441  *	ha:	adapter state pointer.
12442  *	fw:	firmware dump context pointer.
12443  *
12444  * Returns:
12445  *	ql local function return status code.
12446  *
12447  * Context:
12448  *	Interrupt or Kernel context, no mailbox commands allowed.
12449  */
12450 static int
12451 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12452 {
12453 	uint32_t	cnt;
12454 	uint16_t	risc_address;
12455 	clock_t		timer;
12456 	mbx_cmd_t	mc;
12457 	mbx_cmd_t	*mcp = &mc;
12458 	int		rval = QL_SUCCESS;
12459 
12460 	/* Disable ISP interrupts. */
12461 	WRT16_IO_REG(ha, ictrl, 0);
12462 	ADAPTER_STATE_LOCK(ha);
12463 	ha->flags &= ~INTERRUPTS_ENABLED;
12464 	ADAPTER_STATE_UNLOCK(ha);
12465 
12466 	/* Release mailbox registers. */
12467 	WRT16_IO_REG(ha, semaphore, 0);
12468 
12469 	/* Pause RISC. */
12470 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12471 	timer = 30000;
12472 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12473 		if (timer-- != 0) {
12474 			drv_usecwait(MILLISEC);
12475 		} else {
12476 			rval = QL_FUNCTION_TIMEOUT;
12477 			break;
12478 		}
12479 	}
12480 
12481 	if (rval == QL_SUCCESS) {
12482 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12483 		    sizeof (fw->pbiu_reg) / 2, 16);
12484 
12485 		/* In 2200 we only read 8 mailboxes */
12486 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
12487 		    8, 16);
12488 
12489 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
12490 		    sizeof (fw->dma_reg) / 2, 16);
12491 
12492 		WRT16_IO_REG(ha, ctrl_status, 0);
12493 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12494 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12495 
12496 		WRT16_IO_REG(ha, pcr, 0x2000);
12497 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12498 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12499 
12500 		WRT16_IO_REG(ha, pcr, 0x2100);
12501 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12502 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12503 
12504 		WRT16_IO_REG(ha, pcr, 0x2200);
12505 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12506 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12507 
12508 		WRT16_IO_REG(ha, pcr, 0x2300);
12509 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12510 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12511 
12512 		WRT16_IO_REG(ha, pcr, 0x2400);
12513 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12514 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12515 
12516 		WRT16_IO_REG(ha, pcr, 0x2500);
12517 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12518 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12519 
12520 		WRT16_IO_REG(ha, pcr, 0x2600);
12521 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12522 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12523 
12524 		WRT16_IO_REG(ha, pcr, 0x2700);
12525 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12526 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12527 
12528 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12529 		/* 2200 has only 16 registers */
12530 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12531 		    ha->iobase + 0x80, 16, 16);
12532 
12533 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12534 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12535 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12536 
12537 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12538 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12539 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12540 
12541 		/* Select FPM registers. */
12542 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12543 
12544 		/* FPM Soft Reset. */
12545 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12546 
12547 		/* Select frame buffer registers. */
12548 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12549 
12550 		/* Reset frame buffer FIFOs. */
12551 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12552 
12553 		/* Select RISC module registers. */
12554 		WRT16_IO_REG(ha, ctrl_status, 0);
12555 
12556 		/* Reset RISC module. */
12557 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
12558 
12559 		/* Reset ISP semaphore. */
12560 		WRT16_IO_REG(ha, semaphore, 0);
12561 
12562 		/* Release RISC module. */
12563 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12564 
12565 		/* Wait for RISC to recover from reset. */
12566 		timer = 30000;
12567 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
12568 			if (timer-- != 0) {
12569 				drv_usecwait(MILLISEC);
12570 			} else {
12571 				rval = QL_FUNCTION_TIMEOUT;
12572 				break;
12573 			}
12574 		}
12575 
12576 		/* Disable RISC pause on FPM parity error. */
12577 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
12578 	}
12579 
12580 	if (rval == QL_SUCCESS) {
12581 		/* Pause RISC. */
12582 		WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12583 		timer = 30000;
12584 		while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12585 			if (timer-- != 0) {
12586 				drv_usecwait(MILLISEC);
12587 			} else {
12588 				rval = QL_FUNCTION_TIMEOUT;
12589 				break;
12590 			}
12591 		}
12592 	}
12593 
12594 	if (rval == QL_SUCCESS) {
12595 		/* Set memory configuration and timing. */
12596 		WRT16_IO_REG(ha, mctr, 0xf2);
12597 
12598 		/* Release RISC. */
12599 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12600 
12601 		/* Get RISC SRAM. */
12602 		risc_address = 0x1000;
12603 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_WORD);
12604 		for (cnt = 0; cnt < 0xf000; cnt++) {
12605 			WRT16_IO_REG(ha, mailbox[1], risc_address++);
12606 			WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
12607 			for (timer = 6000000; timer != 0; timer--) {
12608 				/* Check for pending interrupts. */
12609 				if (RD16_IO_REG(ha, istatus) & RISC_INT) {
12610 					if (RD16_IO_REG(ha, semaphore) &
12611 					    BIT_0) {
12612 						WRT16_IO_REG(ha, hccr,
12613 						    HC_CLR_RISC_INT);
12614 						mcp->mb[0] = RD16_IO_REG(ha,
12615 						    mailbox[0]);
12616 						fw->risc_ram[cnt] =
12617 						    RD16_IO_REG(ha,
12618 						    mailbox[2]);
12619 						WRT16_IO_REG(ha,
12620 						    semaphore, 0);
12621 						break;
12622 					}
12623 					WRT16_IO_REG(ha, hccr,
12624 					    HC_CLR_RISC_INT);
12625 				}
12626 				drv_usecwait(5);
12627 			}
12628 
12629 			if (timer == 0) {
12630 				rval = QL_FUNCTION_TIMEOUT;
12631 			} else {
12632 				rval = mcp->mb[0];
12633 			}
12634 
12635 			if (rval != QL_SUCCESS) {
12636 				break;
12637 			}
12638 		}
12639 	}
12640 
12641 	return (rval);
12642 }
12643 
12644 /*
12645  * ql_2300_binary_fw_dump
12646  *
12647  * Input:
12648  *	ha:	adapter state pointer.
12649  *	fw:	firmware dump context pointer.
12650  *
12651  * Returns:
12652  *	ql local function return status code.
12653  *
12654  * Context:
12655  *	Interrupt or Kernel context, no mailbox commands allowed.
12656  */
12657 static int
12658 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
12659 {
12660 	clock_t	timer;
12661 	int	rval = QL_SUCCESS;
12662 
12663 	/* Disable ISP interrupts. */
12664 	WRT16_IO_REG(ha, ictrl, 0);
12665 	ADAPTER_STATE_LOCK(ha);
12666 	ha->flags &= ~INTERRUPTS_ENABLED;
12667 	ADAPTER_STATE_UNLOCK(ha);
12668 
12669 	/* Release mailbox registers. */
12670 	WRT16_IO_REG(ha, semaphore, 0);
12671 
12672 	/* Pause RISC. */
12673 	WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
12674 	timer = 30000;
12675 	while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
12676 		if (timer-- != 0) {
12677 			drv_usecwait(MILLISEC);
12678 		} else {
12679 			rval = QL_FUNCTION_TIMEOUT;
12680 			break;
12681 		}
12682 	}
12683 
12684 	if (rval == QL_SUCCESS) {
12685 		(void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
12686 		    sizeof (fw->pbiu_reg) / 2, 16);
12687 
12688 		(void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
12689 		    sizeof (fw->risc_host_reg) / 2, 16);
12690 
12691 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
12692 		    sizeof (fw->mailbox_reg) / 2, 16);
12693 
12694 		WRT16_IO_REG(ha, ctrl_status, 0x40);
12695 		(void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
12696 		    sizeof (fw->resp_dma_reg) / 2, 16);
12697 
12698 		WRT16_IO_REG(ha, ctrl_status, 0x50);
12699 		(void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
12700 		    sizeof (fw->dma_reg) / 2, 16);
12701 
12702 		WRT16_IO_REG(ha, ctrl_status, 0);
12703 		(void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
12704 		    sizeof (fw->risc_hdw_reg) / 2, 16);
12705 
12706 		WRT16_IO_REG(ha, pcr, 0x2000);
12707 		(void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
12708 		    sizeof (fw->risc_gp0_reg) / 2, 16);
12709 
12710 		WRT16_IO_REG(ha, pcr, 0x2200);
12711 		(void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
12712 		    sizeof (fw->risc_gp1_reg) / 2, 16);
12713 
12714 		WRT16_IO_REG(ha, pcr, 0x2400);
12715 		(void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
12716 		    sizeof (fw->risc_gp2_reg) / 2, 16);
12717 
12718 		WRT16_IO_REG(ha, pcr, 0x2600);
12719 		(void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
12720 		    sizeof (fw->risc_gp3_reg) / 2, 16);
12721 
12722 		WRT16_IO_REG(ha, pcr, 0x2800);
12723 		(void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
12724 		    sizeof (fw->risc_gp4_reg) / 2, 16);
12725 
12726 		WRT16_IO_REG(ha, pcr, 0x2A00);
12727 		(void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
12728 		    sizeof (fw->risc_gp5_reg) / 2, 16);
12729 
12730 		WRT16_IO_REG(ha, pcr, 0x2C00);
12731 		(void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
12732 		    sizeof (fw->risc_gp6_reg) / 2, 16);
12733 
12734 		WRT16_IO_REG(ha, pcr, 0x2E00);
12735 		(void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
12736 		    sizeof (fw->risc_gp7_reg) / 2, 16);
12737 
12738 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12739 		(void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
12740 		    ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
12741 
12742 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12743 		(void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
12744 		    sizeof (fw->fpm_b0_reg) / 2, 16);
12745 
12746 		WRT16_IO_REG(ha, ctrl_status, 0x30);
12747 		(void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
12748 		    sizeof (fw->fpm_b1_reg) / 2, 16);
12749 
12750 		/* Select FPM registers. */
12751 		WRT16_IO_REG(ha, ctrl_status, 0x20);
12752 
12753 		/* FPM Soft Reset. */
12754 		WRT16_IO_REG(ha, fpm_diag_config, 0x100);
12755 
12756 		/* Select frame buffer registers. */
12757 		WRT16_IO_REG(ha, ctrl_status, 0x10);
12758 
12759 		/* Reset frame buffer FIFOs. */
12760 		WRT16_IO_REG(ha, fb_cmd, 0xa000);
12761 
12762 		/* Select RISC module registers. */
12763 		WRT16_IO_REG(ha, ctrl_status, 0);
12764 
12765 		/* Reset RISC module. */
12766 		WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
12767 
12768 		/* Reset ISP semaphore. */
12769 		WRT16_IO_REG(ha, semaphore, 0);
12770 
12771 		/* Release RISC module. */
12772 		WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
12773 
12774 		/* Wait for RISC to recover from reset. */
12775 		timer = 30000;
12776 		while (RD16_IO_REG(ha, mailbox[0]) == MBS_BUSY) {
12777 			if (timer-- != 0) {
12778 				drv_usecwait(MILLISEC);
12779 			} else {
12780 				rval = QL_FUNCTION_TIMEOUT;
12781 				break;
12782 			}
12783 		}
12784 
12785 		/* Disable RISC pause on FPM parity error. */
12786 		WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
12787 	}
12788 
12789 	/* Get RISC SRAM. */
12790 	if (rval == QL_SUCCESS) {
12791 		rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
12792 	}
12793 	/* Get STACK SRAM. */
12794 	if (rval == QL_SUCCESS) {
12795 		rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
12796 	}
12797 	/* Get DATA SRAM. */
12798 	if (rval == QL_SUCCESS) {
12799 		rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
12800 	}
12801 
12802 	return (rval);
12803 }
12804 
12805 /*
12806  * ql_24xx_binary_fw_dump
12807  *
12808  * Input:
12809  *	ha:	adapter state pointer.
12810  *	fw:	firmware dump context pointer.
12811  *
12812  * Returns:
12813  *	ql local function return status code.
12814  *
12815  * Context:
12816  *	Interrupt or Kernel context, no mailbox commands allowed.
12817  */
12818 static int
12819 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
12820 {
12821 	uint32_t	*reg32;
12822 	void		*bp;
12823 	clock_t		timer;
12824 	int		rval = QL_SUCCESS;
12825 
12826 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12827 
12828 	fw->hccr = RD32_IO_REG(ha, hccr);
12829 
12830 	/* Pause RISC. */
12831 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
12832 
12833 		WRT32_IO_REG(ha, hccr, HC24_RESET_RISC | HC24_CLR_HOST_INT);
12834 		RD32_IO_REG(ha, hccr);		/* PCI Posting. */
12835 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
12836 		for (timer = 30000;
12837 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
12838 		    rval == QL_SUCCESS; timer--) {
12839 			if (timer) {
12840 				drv_usecwait(100);
12841 			} else {
12842 				rval = QL_FUNCTION_TIMEOUT;
12843 			}
12844 		}
12845 	}
12846 
12847 	if (rval == QL_SUCCESS) {
12848 		/* Host interface registers. */
12849 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
12850 		    sizeof (fw->host_reg) / 4, 32);
12851 
12852 		/* Disable ISP interrupts. */
12853 		WRT32_IO_REG(ha, ictrl, 0);
12854 		RD32_IO_REG(ha, ictrl);
12855 		ADAPTER_STATE_LOCK(ha);
12856 		ha->flags &= ~INTERRUPTS_ENABLED;
12857 		ADAPTER_STATE_UNLOCK(ha);
12858 
12859 		/* Mailbox registers. */
12860 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
12861 		    sizeof (fw->mailbox_reg) / 2, 16);
12862 
12863 		/* Transfer sequence registers. */
12864 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
12865 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
12866 		    16, 32);
12867 
12868 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
12869 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12870 
12871 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
12872 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12873 
12874 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
12875 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12876 
12877 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
12878 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12879 
12880 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
12881 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12882 
12883 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
12884 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12885 
12886 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
12887 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12888 
12889 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
12890 		(void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
12891 		    sizeof (fw->xseq_0_reg) / 4, 32);
12892 
12893 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
12894 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
12895 		    sizeof (fw->xseq_1_reg) / 4, 32);
12896 
12897 		/* Receive sequence registers. */
12898 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
12899 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
12900 		    16, 32);
12901 
12902 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
12903 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12904 
12905 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
12906 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12907 
12908 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
12909 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12910 
12911 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
12912 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12913 
12914 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
12915 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12916 
12917 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
12918 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12919 
12920 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
12921 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12922 
12923 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
12924 		(void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
12925 		    sizeof (fw->rseq_0_reg) / 4, 32);
12926 
12927 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
12928 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
12929 		    sizeof (fw->rseq_1_reg) / 4, 32);
12930 
12931 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
12932 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
12933 		    sizeof (fw->rseq_2_reg) / 4, 32);
12934 
12935 		/* Command DMA registers. */
12936 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
12937 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
12938 		    sizeof (fw->cmd_dma_reg) / 4, 32);
12939 
12940 		/* Queues. */
12941 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
12942 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
12943 		    8, 32);
12944 
12945 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
12946 
12947 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
12948 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
12949 		    8, 32);
12950 
12951 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
12952 
12953 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
12954 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
12955 		    8, 32);
12956 
12957 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
12958 
12959 		/* Transmit DMA registers. */
12960 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
12961 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
12962 		    16, 32);
12963 
12964 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12965 
12966 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
12967 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12968 
12969 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
12970 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
12971 		    16, 32);
12972 
12973 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
12974 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12975 
12976 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
12977 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
12978 		    16, 32);
12979 
12980 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
12981 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12982 
12983 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
12984 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
12985 		    16, 32);
12986 
12987 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
12988 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12989 
12990 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
12991 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
12992 		    16, 32);
12993 
12994 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
12995 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
12996 
12997 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
12998 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
12999 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13000 
13001 		/* Receive DMA registers. */
13002 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13003 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13004 		    ha->iobase + 0xC0, 16, 32);
13005 
13006 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13007 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13008 
13009 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13010 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13011 		    ha->iobase + 0xC0, 16, 32);
13012 
13013 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13014 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13015 
13016 		/* RISC registers. */
13017 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13018 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13019 		    16, 32);
13020 
13021 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13022 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13023 
13024 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13025 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13026 
13027 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13028 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13029 
13030 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13031 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13032 
13033 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13034 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13035 
13036 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13037 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13038 
13039 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13040 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13041 
13042 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13043 		RD32_IO_REG(ha, io_base_addr);
13044 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13045 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13046 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13047 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13048 
13049 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13050 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13051 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13052 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13053 
13054 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13055 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13056 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13057 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13058 
13059 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13060 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13061 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13062 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13063 
13064 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13065 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13066 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13067 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13068 
13069 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13070 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13071 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13072 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13073 
13074 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13075 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13076 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13077 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13078 
13079 		/* Local memory controller registers. */
13080 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13081 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13082 		    16, 32);
13083 
13084 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13085 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13086 
13087 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13088 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13089 
13090 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13091 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13092 
13093 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13094 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13095 
13096 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13097 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13098 
13099 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13100 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13101 
13102 		/* Fibre Protocol Module registers. */
13103 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13104 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13105 		    16, 32);
13106 
13107 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13108 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13109 
13110 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13111 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13112 
13113 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13114 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13115 
13116 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13117 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13118 
13119 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13120 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13121 
13122 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13123 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13124 
13125 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13126 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13127 
13128 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13129 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13130 
13131 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13132 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13133 
13134 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13135 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13136 
13137 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13138 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13139 
13140 		/* Frame Buffer registers. */
13141 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13142 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13143 		    16, 32);
13144 
13145 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13146 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13147 
13148 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13149 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13150 
13151 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13152 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13153 
13154 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13155 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13156 
13157 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13158 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13159 
13160 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13161 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13162 
13163 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13164 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13165 
13166 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13167 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13168 
13169 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13170 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13171 
13172 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13173 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13174 
13175 	}
13176 
13177 	/* Reset RISC. */
13178 	ql_reset_chip(ha);
13179 
13180 	/* Memory. */
13181 	if (rval == QL_SUCCESS) {
13182 		/* Code RAM. */
13183 		rval = ql_read_risc_ram(ha, 0x20000,
13184 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13185 	}
13186 	if (rval == QL_SUCCESS) {
13187 		/* External Memory. */
13188 		rval = ql_read_risc_ram(ha, 0x100000,
13189 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13190 	}
13191 
13192 	if (rval != QL_SUCCESS) {
13193 		EL(ha, "failed=%xh\n", rval);
13194 	} else {
13195 		/*EMPTY*/
13196 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13197 	}
13198 
13199 	return (rval);
13200 }
13201 
13202 /*
13203  * ql_25xx_binary_fw_dump
13204  *
13205  * Input:
13206  *	ha:	adapter state pointer.
13207  *	fw:	firmware dump context pointer.
13208  *
13209  * Returns:
13210  *	ql local function return status code.
13211  *
13212  * Context:
13213  *	Interrupt or Kernel context, no mailbox commands allowed.
13214  */
13215 static int
13216 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13217 {
13218 	uint32_t	*reg32;
13219 	void		*bp;
13220 	clock_t		timer;
13221 	int		rval = QL_SUCCESS;
13222 
13223 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13224 
13225 	fw->r2h_status = RD32_IO_REG(ha, intr_info_lo);
13226 
13227 	/* Pause RISC. */
13228 	if ((RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0) {
13229 
13230 		WRT32_IO_REG(ha, hccr, HC24_RESET_RISC | HC24_CLR_HOST_INT);
13231 		RD32_IO_REG(ha, hccr);		/* PCI Posting. */
13232 		WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13233 		for (timer = 30000;
13234 		    (RD32_IO_REG(ha, intr_info_lo) & RH_RISC_PAUSED) == 0 &&
13235 		    rval == QL_SUCCESS; timer--) {
13236 			if (timer) {
13237 				drv_usecwait(100);
13238 			} else {
13239 				EL(ha, "risc pause timeout\n");
13240 				rval = QL_FUNCTION_TIMEOUT;
13241 			}
13242 		}
13243 	}
13244 
13245 	if (rval == QL_SUCCESS) {
13246 
13247 		/* HostRisc registers. */
13248 		WRT32_IO_REG(ha, io_base_addr, 0x7000);
13249 		bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13250 		    16, 32);
13251 		WRT32_IO_REG(ha, io_base_addr, 0x7010);
13252 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13253 
13254 		/* PCIe registers. */
13255 		WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13256 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13257 		bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13258 		    3, 32);
13259 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13260 		WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13261 
13262 		/* Host interface registers. */
13263 		(void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13264 		    sizeof (fw->host_reg) / 4, 32);
13265 
13266 		/* Disable ISP interrupts. */
13267 		WRT32_IO_REG(ha, ictrl, 0);
13268 		RD32_IO_REG(ha, ictrl);
13269 		ADAPTER_STATE_LOCK(ha);
13270 		ha->flags &= ~INTERRUPTS_ENABLED;
13271 		ADAPTER_STATE_UNLOCK(ha);
13272 
13273 		/* Mailbox registers. */
13274 		(void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13275 		    sizeof (fw->mailbox_reg) / 2, 16);
13276 
13277 		/* Transfer sequence registers. */
13278 		WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13279 		bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13280 		    16, 32);
13281 
13282 		WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13283 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13284 
13285 		WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13286 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13287 
13288 		WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13289 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13290 
13291 		WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13292 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13293 
13294 		WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13295 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13296 
13297 		WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13298 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13299 
13300 		WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13301 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13302 
13303 		WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
13304 		bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13305 		    16, 32);
13306 
13307 		WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
13308 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13309 
13310 		WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13311 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13312 
13313 		WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13314 		(void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13315 		    16, 32);
13316 
13317 		/* Receive sequence registers. */
13318 		WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13319 		bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13320 		    16, 32);
13321 
13322 		WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13323 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13324 
13325 		WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13326 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13327 
13328 		WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13329 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13330 
13331 		WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13332 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13333 
13334 		WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13335 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13336 
13337 		WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13338 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13339 
13340 		WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13341 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13342 
13343 		WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
13344 		bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13345 		    16, 32);
13346 
13347 		WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13348 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13349 
13350 		WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13351 		(void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13352 		    sizeof (fw->rseq_1_reg) / 4, 32);
13353 
13354 		WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13355 		(void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13356 		    sizeof (fw->rseq_2_reg) / 4, 32);
13357 
13358 		/* Auxiliary sequencer registers. */
13359 		WRT32_IO_REG(ha, io_base_addr, 0xB000);
13360 		bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
13361 		    16, 32);
13362 
13363 		WRT32_IO_REG(ha, io_base_addr, 0xB010);
13364 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13365 
13366 		WRT32_IO_REG(ha, io_base_addr, 0xB020);
13367 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13368 
13369 		WRT32_IO_REG(ha, io_base_addr, 0xB030);
13370 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13371 
13372 		WRT32_IO_REG(ha, io_base_addr, 0xB040);
13373 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13374 
13375 		WRT32_IO_REG(ha, io_base_addr, 0xB050);
13376 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13377 
13378 		WRT32_IO_REG(ha, io_base_addr, 0xB060);
13379 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13380 
13381 		WRT32_IO_REG(ha, io_base_addr, 0xB070);
13382 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13383 
13384 		WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
13385 		bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
13386 		    16, 32);
13387 
13388 		WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
13389 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13390 
13391 		WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
13392 		(void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
13393 		    16, 32);
13394 
13395 		WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
13396 		(void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
13397 		    16, 32);
13398 
13399 		/* Command DMA registers. */
13400 		WRT32_IO_REG(ha, io_base_addr, 0x7100);
13401 		(void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13402 		    sizeof (fw->cmd_dma_reg) / 4, 32);
13403 
13404 		/* Queues. */
13405 		WRT32_IO_REG(ha, io_base_addr, 0x7200);
13406 		bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13407 		    8, 32);
13408 
13409 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13410 
13411 		WRT32_IO_REG(ha, io_base_addr, 0x7300);
13412 		bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13413 		    8, 32);
13414 
13415 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13416 
13417 		WRT32_IO_REG(ha, io_base_addr, 0x7400);
13418 		bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13419 		    8, 32);
13420 
13421 		(void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13422 
13423 		/* Transmit DMA registers. */
13424 		WRT32_IO_REG(ha, io_base_addr, 0x7600);
13425 		bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13426 		    16, 32);
13427 
13428 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13429 
13430 		WRT32_IO_REG(ha, io_base_addr, 0x7610);
13431 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13432 
13433 		WRT32_IO_REG(ha, io_base_addr, 0x7620);
13434 		bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13435 		    16, 32);
13436 
13437 		WRT32_IO_REG(ha, io_base_addr, 0x7630);
13438 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13439 
13440 		WRT32_IO_REG(ha, io_base_addr, 0x7640);
13441 		bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13442 		    16, 32);
13443 
13444 		WRT32_IO_REG(ha, io_base_addr, 0x7650);
13445 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13446 
13447 		WRT32_IO_REG(ha, io_base_addr, 0x7660);
13448 		bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13449 		    16, 32);
13450 
13451 		WRT32_IO_REG(ha, io_base_addr, 0x7670);
13452 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13453 
13454 		WRT32_IO_REG(ha, io_base_addr, 0x7680);
13455 		bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13456 		    16, 32);
13457 
13458 		WRT32_IO_REG(ha, io_base_addr, 0x7690);
13459 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13460 
13461 		WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13462 		(void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13463 		    ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13464 
13465 		/* Receive DMA registers. */
13466 		WRT32_IO_REG(ha, io_base_addr, 0x7700);
13467 		bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13468 		    ha->iobase + 0xC0, 16, 32);
13469 
13470 		WRT32_IO_REG(ha, io_base_addr, 0x7710);
13471 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13472 
13473 		WRT32_IO_REG(ha, io_base_addr, 0x7720);
13474 		bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13475 		    ha->iobase + 0xC0, 16, 32);
13476 
13477 		WRT32_IO_REG(ha, io_base_addr, 0x7730);
13478 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13479 
13480 		/* RISC registers. */
13481 		WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13482 		bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13483 		    16, 32);
13484 
13485 		WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13486 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13487 
13488 		WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13489 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13490 
13491 		WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13492 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13493 
13494 		WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13495 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13496 
13497 		WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13498 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13499 
13500 		WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13501 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13502 
13503 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13504 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13505 
13506 		WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13507 		RD32_IO_REG(ha, io_base_addr);
13508 
13509 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13510 		WRT_REG_DWORD(ha, reg32, 0xB0000000);
13511 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13512 		fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13513 
13514 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13515 		WRT_REG_DWORD(ha, reg32, 0xB0100000);
13516 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13517 		fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13518 
13519 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13520 		WRT_REG_DWORD(ha, reg32, 0xB0200000);
13521 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13522 		fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13523 
13524 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13525 		WRT_REG_DWORD(ha, reg32, 0xB0300000);
13526 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13527 		fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13528 
13529 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13530 		WRT_REG_DWORD(ha, reg32, 0xB0400000);
13531 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13532 		fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13533 
13534 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13535 		WRT_REG_DWORD(ha, reg32, 0xB0500000);
13536 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13537 		fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13538 
13539 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13540 		WRT_REG_DWORD(ha, reg32, 0xB0600000);
13541 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13542 		fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13543 
13544 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13545 		WRT_REG_DWORD(ha, reg32, 0xB0700000);
13546 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13547 		fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
13548 
13549 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13550 		WRT_REG_DWORD(ha, reg32, 0xB0800000);
13551 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13552 		fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
13553 
13554 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13555 		WRT_REG_DWORD(ha, reg32, 0xB0900000);
13556 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13557 		fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
13558 
13559 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13560 		WRT_REG_DWORD(ha, reg32, 0xB0A00000);
13561 		reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13562 		fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
13563 
13564 		/* RISC IO register. */
13565 		WRT32_IO_REG(ha, io_base_addr, 0x0010);
13566 		(void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
13567 		    1, 32);
13568 
13569 		/* Local memory controller (LMC) registers. */
13570 		WRT32_IO_REG(ha, io_base_addr, 0x3000);
13571 		bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13572 		    16, 32);
13573 
13574 		WRT32_IO_REG(ha, io_base_addr, 0x3010);
13575 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13576 
13577 		WRT32_IO_REG(ha, io_base_addr, 0x3020);
13578 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13579 
13580 		WRT32_IO_REG(ha, io_base_addr, 0x3030);
13581 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13582 
13583 		WRT32_IO_REG(ha, io_base_addr, 0x3040);
13584 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13585 
13586 		WRT32_IO_REG(ha, io_base_addr, 0x3050);
13587 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13588 
13589 		WRT32_IO_REG(ha, io_base_addr, 0x3060);
13590 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13591 
13592 		WRT32_IO_REG(ha, io_base_addr, 0x3070);
13593 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13594 
13595 		/* Fibre Protocol Module registers. */
13596 		WRT32_IO_REG(ha, io_base_addr, 0x4000);
13597 		bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13598 		    16, 32);
13599 
13600 		WRT32_IO_REG(ha, io_base_addr, 0x4010);
13601 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13602 
13603 		WRT32_IO_REG(ha, io_base_addr, 0x4020);
13604 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13605 
13606 		WRT32_IO_REG(ha, io_base_addr, 0x4030);
13607 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13608 
13609 		WRT32_IO_REG(ha, io_base_addr, 0x4040);
13610 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13611 
13612 		WRT32_IO_REG(ha, io_base_addr, 0x4050);
13613 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13614 
13615 		WRT32_IO_REG(ha, io_base_addr, 0x4060);
13616 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13617 
13618 		WRT32_IO_REG(ha, io_base_addr, 0x4070);
13619 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13620 
13621 		WRT32_IO_REG(ha, io_base_addr, 0x4080);
13622 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13623 
13624 		WRT32_IO_REG(ha, io_base_addr, 0x4090);
13625 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13626 
13627 		WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13628 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13629 
13630 		WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13631 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13632 
13633 		/* Frame Buffer registers. */
13634 		WRT32_IO_REG(ha, io_base_addr, 0x6000);
13635 		bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13636 		    16, 32);
13637 
13638 		WRT32_IO_REG(ha, io_base_addr, 0x6010);
13639 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13640 
13641 		WRT32_IO_REG(ha, io_base_addr, 0x6020);
13642 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13643 
13644 		WRT32_IO_REG(ha, io_base_addr, 0x6030);
13645 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13646 
13647 		WRT32_IO_REG(ha, io_base_addr, 0x6040);
13648 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13649 
13650 		WRT32_IO_REG(ha, io_base_addr, 0x6100);
13651 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13652 
13653 		WRT32_IO_REG(ha, io_base_addr, 0x6130);
13654 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13655 
13656 		WRT32_IO_REG(ha, io_base_addr, 0x6150);
13657 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13658 
13659 		WRT32_IO_REG(ha, io_base_addr, 0x6170);
13660 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13661 
13662 		WRT32_IO_REG(ha, io_base_addr, 0x6190);
13663 		bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13664 
13665 		WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13666 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13667 
13668 		WRT32_IO_REG(ha, io_base_addr, 0x6F00);
13669 		(void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13670 	}
13671 
13672 	/* Reset RISC. */
13673 	ql_reset_chip(ha);
13674 
13675 	/* Memory. */
13676 	if (rval == QL_SUCCESS) {
13677 		/* Code RAM. */
13678 		rval = ql_read_risc_ram(ha, 0x20000,
13679 		    sizeof (fw->code_ram) / 4, fw->code_ram);
13680 	}
13681 	if (rval == QL_SUCCESS) {
13682 		/* External Memory. */
13683 		rval = ql_read_risc_ram(ha, 0x100000,
13684 		    ha->fw_ext_memory_size / 4, fw->ext_mem);
13685 	}
13686 
13687 	/* Get the request and response queues */
13688 	if (rval == QL_SUCCESS) {
13689 		uint32_t	cnt;
13690 		uint16_t	*w16 = ha->hba_buf.bp;
13691 
13692 		for (cnt = 0; cnt < sizeof (fw->req_rsp_q) / 2; cnt++) {
13693 			fw->req_rsp_q[cnt] = *w16++;
13694 			LITTLE_ENDIAN_16(&fw->req_rsp_q[cnt]);
13695 		}
13696 	}
13697 
13698 	if (rval != QL_SUCCESS) {
13699 		EL(ha, "failed=%xh\n", rval);
13700 	} else {
13701 		/*EMPTY*/
13702 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13703 	}
13704 
13705 	return (rval);
13706 }
13707 
13708 /*
13709  * ql_read_risc_ram
13710  *	Reads RISC RAM one word at a time.
13711  *	Risc interrupts must be disabled when this routine is called.
13712  *
13713  * Input:
13714  *	ha:	adapter state pointer.
13715  *	risc_address:	RISC code start address.
13716  *	len:		Number of words.
13717  *	buf:		buffer pointer.
13718  *
13719  * Returns:
13720  *	ql local function return status code.
13721  *
13722  * Context:
13723  *	Interrupt or Kernel context, no mailbox commands allowed.
13724  */
13725 static int
13726 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
13727     void *buf)
13728 {
13729 	uint32_t	cnt;
13730 	uint16_t	stat;
13731 	clock_t		timer;
13732 	uint16_t	*buf16 = (uint16_t *)buf;
13733 	uint32_t	*buf32 = (uint32_t *)buf;
13734 	int		rval = QL_SUCCESS;
13735 
13736 	for (cnt = 0; cnt < len; cnt++, risc_address++) {
13737 		WRT16_IO_REG(ha, mailbox[0], MBC_READ_RAM_EXTENDED);
13738 		WRT16_IO_REG(ha, mailbox[1], LSW(risc_address));
13739 		WRT16_IO_REG(ha, mailbox[8], MSW(risc_address));
13740 		CFG_IST(ha, CFG_CTRL_2425) ?
13741 		    WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT) :
13742 		    WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13743 		for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
13744 			if (RD16_IO_REG(ha, istatus) & RISC_INT) {
13745 				stat = (uint16_t)
13746 				    (RD16_IO_REG(ha, intr_info_lo) & 0xff);
13747 				if ((stat == 1) || (stat == 0x10)) {
13748 					if (CFG_IST(ha, CFG_CTRL_2425)) {
13749 						buf32[cnt] = SHORT_TO_LONG(
13750 						    RD16_IO_REG(ha,
13751 						    mailbox[2]),
13752 						    RD16_IO_REG(ha,
13753 						    mailbox[3]));
13754 					} else {
13755 						buf16[cnt] =
13756 						    RD16_IO_REG(ha, mailbox[2]);
13757 					}
13758 
13759 					break;
13760 				} else if ((stat == 2) || (stat == 0x11)) {
13761 					rval = RD16_IO_REG(ha, mailbox[0]);
13762 					break;
13763 				}
13764 				if (CFG_IST(ha, CFG_CTRL_2425)) {
13765 					WRT32_IO_REG(ha, hccr,
13766 					    HC24_CLR_RISC_INT);
13767 					RD32_IO_REG(ha, hccr);
13768 				} else {
13769 					WRT16_IO_REG(ha, hccr,
13770 					    HC_CLR_RISC_INT);
13771 				}
13772 			}
13773 			drv_usecwait(5);
13774 		}
13775 		if (CFG_IST(ha, CFG_CTRL_2425)) {
13776 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
13777 			RD32_IO_REG(ha, hccr);
13778 		} else {
13779 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
13780 			WRT16_IO_REG(ha, semaphore, 0);
13781 		}
13782 
13783 		if (timer == 0) {
13784 			rval = QL_FUNCTION_TIMEOUT;
13785 		}
13786 	}
13787 
13788 	return (rval);
13789 }
13790 
13791 /*
13792  * ql_read_regs
13793  *	Reads adapter registers to buffer.
13794  *
13795  * Input:
13796  *	ha:	adapter state pointer.
13797  *	buf:	buffer pointer.
13798  *	reg:	start address.
13799  *	count:	number of registers.
13800  *	wds:	register size.
13801  *
13802  * Context:
13803  *	Interrupt or Kernel context, no mailbox commands allowed.
13804  */
13805 static void *
13806 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
13807     uint8_t wds)
13808 {
13809 	uint32_t	*bp32, *reg32;
13810 	uint16_t	*bp16, *reg16;
13811 	uint8_t		*bp8, *reg8;
13812 
13813 	switch (wds) {
13814 	case 32:
13815 		bp32 = buf;
13816 		reg32 = reg;
13817 		while (count--) {
13818 			*bp32++ = RD_REG_DWORD(ha, reg32++);
13819 		}
13820 		return (bp32);
13821 	case 16:
13822 		bp16 = buf;
13823 		reg16 = reg;
13824 		while (count--) {
13825 			*bp16++ = RD_REG_WORD(ha, reg16++);
13826 		}
13827 		return (bp16);
13828 	case 8:
13829 		bp8 = buf;
13830 		reg8 = reg;
13831 		while (count--) {
13832 			*bp8++ = RD_REG_BYTE(ha, reg8++);
13833 		}
13834 		return (bp8);
13835 	default:
13836 		EL(ha, "Unknown word size=%d\n", wds);
13837 		return (buf);
13838 	}
13839 }
13840 
13841 static int
13842 ql_save_config_regs(dev_info_t *dip)
13843 {
13844 	ql_adapter_state_t	*ha;
13845 	int			ret;
13846 	ql_config_space_t	chs;
13847 	caddr_t			prop = "ql-config-space";
13848 
13849 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
13850 	ASSERT(ha != NULL);
13851 	if (ha == NULL) {
13852 		QL_PRINT_2(CE_CONT, "no adapter ptr\n");
13853 		return (DDI_FAILURE);
13854 	}
13855 
13856 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13857 
13858 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
13859 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
13860 	    1) {
13861 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
13862 		return (DDI_SUCCESS);
13863 	}
13864 
13865 	chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
13866 	chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
13867 	    PCI_CONF_HEADER);
13868 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
13869 		chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
13870 		    PCI_BCNF_BCNTRL);
13871 	}
13872 
13873 	chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
13874 	    PCI_CONF_CACHE_LINESZ);
13875 
13876 	chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
13877 	    PCI_CONF_LATENCY_TIMER);
13878 
13879 	if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
13880 		chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
13881 		    PCI_BCNF_LATENCY_TIMER);
13882 	}
13883 
13884 	chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
13885 	chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
13886 	chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
13887 	chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
13888 	chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
13889 	chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
13890 
13891 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
13892 	ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
13893 	    (uchar_t *)&chs, sizeof (ql_config_space_t));
13894 
13895 	if (ret != DDI_PROP_SUCCESS) {
13896 		cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
13897 		    QL_NAME, ddi_get_instance(dip), prop);
13898 		return (DDI_FAILURE);
13899 	}
13900 
13901 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13902 
13903 	return (DDI_SUCCESS);
13904 }
13905 
13906 static int
13907 ql_restore_config_regs(dev_info_t *dip)
13908 {
13909 	ql_adapter_state_t	*ha;
13910 	uint_t			elements;
13911 	ql_config_space_t	*chs_p;
13912 	caddr_t			prop = "ql-config-space";
13913 
13914 	ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
13915 	ASSERT(ha != NULL);
13916 	if (ha == NULL) {
13917 		QL_PRINT_2(CE_CONT, "no adapter ptr\n");
13918 		return (DDI_FAILURE);
13919 	}
13920 
13921 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13922 
13923 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
13924 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
13925 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
13926 	    (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
13927 		QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
13928 		return (DDI_FAILURE);
13929 	}
13930 
13931 	ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
13932 
13933 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
13934 		ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
13935 		    chs_p->chs_bridge_control);
13936 	}
13937 
13938 	ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
13939 	    chs_p->chs_cache_line_size);
13940 
13941 	ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
13942 	    chs_p->chs_latency_timer);
13943 
13944 	if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
13945 		ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
13946 		    chs_p->chs_sec_latency_timer);
13947 	}
13948 
13949 	ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
13950 	ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
13951 	ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
13952 	ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
13953 	ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
13954 	ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
13955 
13956 	ddi_prop_free(chs_p);
13957 
13958 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
13959 	if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
13960 		cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
13961 		    QL_NAME, ddi_get_instance(dip), prop);
13962 	}
13963 
13964 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13965 
13966 	return (DDI_SUCCESS);
13967 }
13968 
13969 uint8_t
13970 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
13971 {
13972 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
13973 		return (ddi_get8(ha->sbus_config_handle,
13974 		    (uint8_t *)(ha->sbus_config_base + off)));
13975 	}
13976 
13977 #ifdef KERNEL_32
13978 	return (pci_config_getb(ha->pci_handle, off));
13979 #else
13980 	return (pci_config_get8(ha->pci_handle, off));
13981 #endif
13982 }
13983 
13984 uint16_t
13985 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
13986 {
13987 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
13988 		return (ddi_get16(ha->sbus_config_handle,
13989 		    (uint16_t *)(ha->sbus_config_base + off)));
13990 	}
13991 
13992 #ifdef KERNEL_32
13993 	return (pci_config_getw(ha->pci_handle, off));
13994 #else
13995 	return (pci_config_get16(ha->pci_handle, off));
13996 #endif
13997 }
13998 
13999 uint32_t
14000 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
14001 {
14002 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14003 		return (ddi_get32(ha->sbus_config_handle,
14004 		    (uint32_t *)(ha->sbus_config_base + off)));
14005 	}
14006 
14007 #ifdef KERNEL_32
14008 	return (pci_config_getl(ha->pci_handle, off));
14009 #else
14010 	return (pci_config_get32(ha->pci_handle, off));
14011 #endif
14012 }
14013 
14014 void
14015 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
14016 {
14017 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14018 		ddi_put8(ha->sbus_config_handle,
14019 		    (uint8_t *)(ha->sbus_config_base + off), val);
14020 	} else {
14021 #ifdef KERNEL_32
14022 		pci_config_putb(ha->pci_handle, off, val);
14023 #else
14024 		pci_config_put8(ha->pci_handle, off, val);
14025 #endif
14026 	}
14027 }
14028 
14029 void
14030 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
14031 {
14032 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14033 		ddi_put16(ha->sbus_config_handle,
14034 		    (uint16_t *)(ha->sbus_config_base + off), val);
14035 	} else {
14036 #ifdef KERNEL_32
14037 		pci_config_putw(ha->pci_handle, off, val);
14038 #else
14039 		pci_config_put16(ha->pci_handle, off, val);
14040 #endif
14041 	}
14042 }
14043 
14044 void
14045 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
14046 {
14047 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
14048 		ddi_put32(ha->sbus_config_handle,
14049 		    (uint32_t *)(ha->sbus_config_base + off), val);
14050 	} else {
14051 #ifdef KERNEL_32
14052 		pci_config_putl(ha->pci_handle, off, val);
14053 #else
14054 		pci_config_put32(ha->pci_handle, off, val);
14055 #endif
14056 	}
14057 }
14058 
14059 /*
14060  * ql_halt
14061  *	Waits for commands that are running to finish and
14062  *	if they do not, commands are aborted.
14063  *	Finally the adapter is reset.
14064  *
14065  * Input:
14066  *	ha:	adapter state pointer.
14067  *	pwr:	power state.
14068  *
14069  * Context:
14070  *	Kernel context.
14071  */
14072 static void
14073 ql_halt(ql_adapter_state_t *ha, int pwr)
14074 {
14075 	uint32_t	cnt;
14076 	ql_tgt_t	*tq;
14077 	ql_srb_t	*sp;
14078 	uint16_t	index;
14079 	ql_link_t	*link;
14080 
14081 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14082 
14083 	/* Wait for all commands running to finish. */
14084 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
14085 		for (link = ha->dev[index].first; link != NULL;
14086 		    link = link->next) {
14087 			tq = link->base_address;
14088 			(void) ql_abort_device(ha, tq, 0);
14089 
14090 			/* Wait for 30 seconds for commands to finish. */
14091 			for (cnt = 3000; cnt != 0; cnt--) {
14092 				/* Acquire device queue lock. */
14093 				DEVICE_QUEUE_LOCK(tq);
14094 				if (tq->outcnt == 0) {
14095 					/* Release device queue lock. */
14096 					DEVICE_QUEUE_UNLOCK(tq);
14097 					break;
14098 				} else {
14099 					/* Release device queue lock. */
14100 					DEVICE_QUEUE_UNLOCK(tq);
14101 					ql_delay(ha, 10000);
14102 				}
14103 			}
14104 
14105 			/* Finish any commands waiting for more status. */
14106 			if (ha->status_srb != NULL) {
14107 				sp = ha->status_srb;
14108 				ha->status_srb = NULL;
14109 				sp->cmd.next = NULL;
14110 				ql_done(&sp->cmd);
14111 			}
14112 
14113 			/* Abort commands that did not finish. */
14114 			if (cnt == 0) {
14115 				for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
14116 				    cnt++) {
14117 					if (ha->pending_cmds.first != NULL) {
14118 						ql_start_iocb(ha, NULL);
14119 						cnt = 1;
14120 					}
14121 					sp = ha->outstanding_cmds[cnt];
14122 					if (sp != NULL &&
14123 					    sp->lun_queue->target_queue ==
14124 					    tq) {
14125 						(void) ql_abort((opaque_t)ha,
14126 						    sp->pkt, 0);
14127 					}
14128 				}
14129 			}
14130 		}
14131 	}
14132 
14133 	/* Shutdown IP. */
14134 	if (ha->flags & IP_INITIALIZED) {
14135 		(void) ql_shutdown_ip(ha);
14136 	}
14137 
14138 	/* Stop all timers. */
14139 	ADAPTER_STATE_LOCK(ha);
14140 	ha->port_retry_timer = 0;
14141 	ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
14142 	ha->watchdog_timer = 0;
14143 	ADAPTER_STATE_UNLOCK(ha);
14144 
14145 	if (pwr == PM_LEVEL_D3) {
14146 		ADAPTER_STATE_LOCK(ha);
14147 		ha->flags &= ~ONLINE;
14148 		ADAPTER_STATE_UNLOCK(ha);
14149 
14150 		/* Reset ISP chip. */
14151 		ql_reset_chip(ha);
14152 	}
14153 
14154 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14155 }
14156 
14157 /*
14158  * ql_get_dma_mem
14159  *	Function used to allocate dma memory.
14160  *
14161  * Input:
14162  *	ha:			adapter state pointer.
14163  *	mem:			pointer to dma memory object.
14164  *	size:			size of the request in bytes
14165  *
14166  * Returns:
14167  *	qn local function return status code.
14168  *
14169  * Context:
14170  *	Kernel context.
14171  */
14172 int
14173 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
14174     mem_alloc_type_t allocation_type, mem_alignment_t alignment)
14175 {
14176 	int	rval;
14177 
14178 	QL_PRINT_9(CE_CONT, "(%d): entered\n", ha->instance);
14179 
14180 	mem->size = size;
14181 	mem->type = allocation_type;
14182 	mem->cookie_count = 1;
14183 
14184 	switch (alignment) {
14185 	case MEM_DATA_ALIGN:
14186 		mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
14187 		break;
14188 	case MEM_RING_ALIGN:
14189 		mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
14190 		break;
14191 	default:
14192 		EL(ha, "failed, unknown alignment type %x\n", alignment);
14193 		break;
14194 	}
14195 
14196 	if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
14197 		ql_free_phys(ha, mem);
14198 		EL(ha, "failed, alloc_phys=%xh\n", rval);
14199 	}
14200 
14201 	QL_PRINT_9(CE_CONT, "(%d): exiting\n", ha->instance);
14202 
14203 	return (rval);
14204 }
14205 
14206 /*
14207  * ql_alloc_phys
14208  *	Function used to allocate memory and zero it.
14209  *	Memory is below 4 GB.
14210  *
14211  * Input:
14212  *	ha:			adapter state pointer.
14213  *	mem:			pointer to dma memory object.
14214  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14215  *	mem->cookie_count	number of segments allowed.
14216  *	mem->type		memory allocation type.
14217  *	mem->size		memory size.
14218  *	mem->alignment		memory alignment.
14219  *
14220  * Returns:
14221  *	qn local function return status code.
14222  *
14223  * Context:
14224  *	Kernel context.
14225  */
14226 int
14227 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14228 {
14229 	size_t			rlen;
14230 	ddi_dma_attr_t		dma_attr;
14231 	ddi_device_acc_attr_t	acc_attr = ql_dev_acc_attr;
14232 
14233 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14234 
14235 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14236 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14237 
14238 	dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
14239 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14240 
14241 	/*
14242 	 * Workaround for SUN XMITS buffer must end and start on 8 byte
14243 	 * boundary. Else, hardware will overrun the buffer. Simple fix is
14244 	 * to make sure buffer has enough room for overrun.
14245 	 */
14246 	if (mem->size & 7) {
14247 		mem->size += 8 - (mem->size & 7);
14248 	}
14249 
14250 	mem->flags = DDI_DMA_CONSISTENT;
14251 
14252 	/*
14253 	 * Allocate DMA memory for command.
14254 	 */
14255 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14256 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14257 	    DDI_SUCCESS) {
14258 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14259 		mem->dma_handle = NULL;
14260 		return (QL_MEMORY_ALLOC_FAILED);
14261 	}
14262 
14263 	switch (mem->type) {
14264 	case KERNEL_MEM:
14265 		mem->bp = kmem_zalloc(mem->size, sleep);
14266 		break;
14267 	case BIG_ENDIAN_DMA:
14268 	case LITTLE_ENDIAN_DMA:
14269 	case NO_SWAP_DMA:
14270 		if (mem->type == BIG_ENDIAN_DMA) {
14271 			acc_attr.devacc_attr_endian_flags =
14272 			    DDI_STRUCTURE_BE_ACC;
14273 		} else if (mem->type == NO_SWAP_DMA) {
14274 			acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
14275 		}
14276 		if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
14277 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14278 		    DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
14279 		    &mem->acc_handle) == DDI_SUCCESS) {
14280 			bzero(mem->bp, mem->size);
14281 			/* ensure we got what we asked for (32bit) */
14282 			if (dma_attr.dma_attr_addr_hi == NULL) {
14283 				if (mem->cookie.dmac_notused != NULL) {
14284 					EL(ha, "failed, ddi_dma_mem_alloc "
14285 					    "returned 64 bit DMA address\n");
14286 					ql_free_phys(ha, mem);
14287 					return (QL_MEMORY_ALLOC_FAILED);
14288 				}
14289 			}
14290 		} else {
14291 			mem->acc_handle = NULL;
14292 			mem->bp = NULL;
14293 		}
14294 		break;
14295 	default:
14296 		EL(ha, "failed, unknown type=%xh\n", mem->type);
14297 		mem->acc_handle = NULL;
14298 		mem->bp = NULL;
14299 		break;
14300 	}
14301 
14302 	if (mem->bp == NULL) {
14303 		EL(ha, "failed, ddi_dma_mem_alloc\n");
14304 		ddi_dma_free_handle(&mem->dma_handle);
14305 		mem->dma_handle = NULL;
14306 		return (QL_MEMORY_ALLOC_FAILED);
14307 	}
14308 
14309 	mem->flags |= DDI_DMA_RDWR;
14310 
14311 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14312 		EL(ha, "failed, ddi_dma_addr_bind_handle\n");
14313 		ql_free_phys(ha, mem);
14314 		return (QL_MEMORY_ALLOC_FAILED);
14315 	}
14316 
14317 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14318 
14319 	return (QL_SUCCESS);
14320 }
14321 
14322 /*
14323  * ql_free_phys
14324  *	Function used to free physical memory.
14325  *
14326  * Input:
14327  *	ha:	adapter state pointer.
14328  *	mem:	pointer to dma memory object.
14329  *
14330  * Context:
14331  *	Kernel context.
14332  */
14333 void
14334 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
14335 {
14336 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14337 
14338 	if (mem != NULL && mem->dma_handle != NULL) {
14339 		ql_unbind_dma_buffer(ha, mem);
14340 		switch (mem->type) {
14341 		case KERNEL_MEM:
14342 			if (mem->bp != NULL) {
14343 				kmem_free(mem->bp, mem->size);
14344 			}
14345 			break;
14346 		case LITTLE_ENDIAN_DMA:
14347 		case BIG_ENDIAN_DMA:
14348 		case NO_SWAP_DMA:
14349 			if (mem->acc_handle != NULL) {
14350 				ddi_dma_mem_free(&mem->acc_handle);
14351 				mem->acc_handle = NULL;
14352 			}
14353 			break;
14354 		default:
14355 			break;
14356 		}
14357 		mem->bp = NULL;
14358 		ddi_dma_free_handle(&mem->dma_handle);
14359 		mem->dma_handle = NULL;
14360 	}
14361 
14362 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14363 }
14364 
14365 /*
14366  * ql_alloc_dma_resouce.
14367  *	Allocates DMA resource for buffer.
14368  *
14369  * Input:
14370  *	ha:			adapter state pointer.
14371  *	mem:			pointer to dma memory object.
14372  *	sleep:			KM_SLEEP/KM_NOSLEEP flag.
14373  *	mem->cookie_count	number of segments allowed.
14374  *	mem->type		memory allocation type.
14375  *	mem->size		memory size.
14376  *	mem->bp			pointer to memory or struct buf
14377  *
14378  * Returns:
14379  *	qn local function return status code.
14380  *
14381  * Context:
14382  *	Kernel context.
14383  */
14384 int
14385 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14386 {
14387 	ddi_dma_attr_t	dma_attr;
14388 
14389 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14390 
14391 	dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
14392 	    ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
14393 	dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
14394 
14395 	/*
14396 	 * Allocate DMA handle for command.
14397 	 */
14398 	if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
14399 	    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
14400 	    DDI_SUCCESS) {
14401 		EL(ha, "failed, ddi_dma_alloc_handle\n");
14402 		mem->dma_handle = NULL;
14403 		return (QL_MEMORY_ALLOC_FAILED);
14404 	}
14405 
14406 	mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
14407 
14408 	if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
14409 		EL(ha, "failed, bind_dma_buffer\n");
14410 		ddi_dma_free_handle(&mem->dma_handle);
14411 		mem->dma_handle = NULL;
14412 		return (QL_MEMORY_ALLOC_FAILED);
14413 	}
14414 
14415 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14416 
14417 	return (QL_SUCCESS);
14418 }
14419 
14420 /*
14421  * ql_free_dma_resource
14422  *	Frees DMA resources.
14423  *
14424  * Input:
14425  *	ha:		adapter state pointer.
14426  *	mem:		pointer to dma memory object.
14427  *	mem->dma_handle	DMA memory handle.
14428  *
14429  * Context:
14430  *	Kernel context.
14431  */
14432 void
14433 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
14434 {
14435 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14436 
14437 	ql_free_phys(ha, mem);
14438 
14439 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14440 }
14441 
14442 /*
14443  * ql_bind_dma_buffer
14444  *	Binds DMA buffer.
14445  *
14446  * Input:
14447  *	ha:			adapter state pointer.
14448  *	mem:			pointer to dma memory object.
14449  *	sleep:			KM_SLEEP or KM_NOSLEEP.
14450  *	mem->dma_handle		DMA memory handle.
14451  *	mem->cookie_count	number of segments allowed.
14452  *	mem->type		memory allocation type.
14453  *	mem->size		memory size.
14454  *	mem->bp			pointer to memory or struct buf
14455  *
14456  * Returns:
14457  *	mem->cookies		pointer to list of cookies.
14458  *	mem->cookie_count	number of cookies.
14459  *	status			success = DDI_DMA_MAPPED
14460  *				DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
14461  *				DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
14462  *				DDI_DMA_TOOBIG
14463  *
14464  * Context:
14465  *	Kernel context.
14466  */
14467 static int
14468 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
14469 {
14470 	int			rval;
14471 	ddi_dma_cookie_t	*cookiep;
14472 	uint32_t		cnt = mem->cookie_count;
14473 
14474 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14475 
14476 	if (mem->type == STRUCT_BUF_MEMORY) {
14477 		rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
14478 		    mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
14479 		    DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
14480 	} else {
14481 		rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
14482 		    mem->size, mem->flags, (sleep == KM_SLEEP) ?
14483 		    DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
14484 		    &mem->cookie_count);
14485 	}
14486 
14487 	if (rval == DDI_DMA_MAPPED) {
14488 		if (mem->cookie_count > cnt) {
14489 			(void) ddi_dma_unbind_handle(mem->dma_handle);
14490 			EL(ha, "failed, cookie_count %d > %d\n",
14491 			    mem->cookie_count, cnt);
14492 			rval = DDI_DMA_TOOBIG;
14493 		} else {
14494 			if (mem->cookie_count > 1) {
14495 				if (mem->cookies = kmem_zalloc(
14496 				    sizeof (ddi_dma_cookie_t) *
14497 				    mem->cookie_count, sleep)) {
14498 					*mem->cookies = mem->cookie;
14499 					cookiep = mem->cookies;
14500 					for (cnt = 1; cnt < mem->cookie_count;
14501 					    cnt++) {
14502 						ddi_dma_nextcookie(
14503 						    mem->dma_handle,
14504 						    ++cookiep);
14505 					}
14506 				} else {
14507 					(void) ddi_dma_unbind_handle(
14508 					    mem->dma_handle);
14509 					EL(ha, "failed, kmem_zalloc\n");
14510 					rval = DDI_DMA_NORESOURCES;
14511 				}
14512 			} else {
14513 				/*
14514 				 * It has been reported that dmac_size at times
14515 				 * may be incorrect on sparc machines so for
14516 				 * sparc machines that only have one segment
14517 				 * use the buffer size instead.
14518 				 */
14519 				mem->cookies = &mem->cookie;
14520 				mem->cookies->dmac_size = mem->size;
14521 			}
14522 		}
14523 	}
14524 
14525 	if (rval != DDI_DMA_MAPPED) {
14526 		EL(ha, "failed=%xh\n", rval);
14527 	} else {
14528 		/*EMPTY*/
14529 		QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14530 	}
14531 
14532 	return (rval);
14533 }
14534 
14535 /*
14536  * ql_unbind_dma_buffer
14537  *	Unbinds DMA buffer.
14538  *
14539  * Input:
14540  *	ha:			adapter state pointer.
14541  *	mem:			pointer to dma memory object.
14542  *	mem->dma_handle		DMA memory handle.
14543  *	mem->cookies		pointer to cookie list.
14544  *	mem->cookie_count	number of cookies.
14545  *
14546  * Context:
14547  *	Kernel context.
14548  */
14549 /* ARGSUSED */
14550 static void
14551 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
14552 {
14553 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14554 
14555 	(void) ddi_dma_unbind_handle(mem->dma_handle);
14556 	if (mem->cookie_count > 1) {
14557 		kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
14558 		    mem->cookie_count);
14559 		mem->cookies = NULL;
14560 	}
14561 	mem->cookie_count = 0;
14562 
14563 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
14564 }
14565 
14566 static int
14567 ql_suspend_adapter(ql_adapter_state_t *ha)
14568 {
14569 	clock_t timer;
14570 
14571 	/*
14572 	 * First we will claim mbox ownership so that no
14573 	 * thread using mbox hangs when we disable the
14574 	 * interrupt in the middle of it.
14575 	 */
14576 	MBX_REGISTER_LOCK(ha);
14577 
14578 	/* Check for mailbox available, if not wait for signal. */
14579 	while (ha->mailbox_flags & MBX_BUSY_FLG) {
14580 		ha->mailbox_flags = (uint8_t)
14581 		    (ha->mailbox_flags | MBX_WANT_FLG);
14582 
14583 		/* 30 seconds from now */
14584 		timer = ddi_get_lbolt();
14585 		timer += 32 * drv_usectohz(1000000);
14586 		if (cv_timedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
14587 		    timer) == -1) {
14588 
14589 			/* Release mailbox register lock. */
14590 			MBX_REGISTER_UNLOCK(ha);
14591 			EL(ha, "failed, Suspend mbox");
14592 			return (QL_FUNCTION_TIMEOUT);
14593 		}
14594 	}
14595 
14596 	/* Set busy flag. */
14597 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
14598 	MBX_REGISTER_UNLOCK(ha);
14599 
14600 	(void) ql_wait_outstanding(ha);
14601 
14602 	/*
14603 	 * here we are sure that there will not be any mbox interrupt.
14604 	 * So, let's make sure that we return back all the outstanding
14605 	 * cmds as well as internally queued commands.
14606 	 */
14607 	ql_halt(ha, PM_LEVEL_D0);
14608 
14609 	if (ha->power_level != PM_LEVEL_D3) {
14610 		/* Disable ISP interrupts. */
14611 		WRT16_IO_REG(ha, ictrl, 0);
14612 	}
14613 
14614 	ADAPTER_STATE_LOCK(ha);
14615 	ha->flags &= ~INTERRUPTS_ENABLED;
14616 	ADAPTER_STATE_UNLOCK(ha);
14617 
14618 	MBX_REGISTER_LOCK(ha);
14619 	/* Reset busy status. */
14620 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
14621 
14622 	/* If thread is waiting for mailbox go signal it to start. */
14623 	if (ha->mailbox_flags & MBX_WANT_FLG) {
14624 		ha->mailbox_flags = (uint8_t)
14625 		    (ha->mailbox_flags & ~MBX_WANT_FLG);
14626 		cv_broadcast(&ha->cv_mbx_wait);
14627 	}
14628 	/* Release mailbox register lock. */
14629 	MBX_REGISTER_UNLOCK(ha);
14630 
14631 	return (QL_SUCCESS);
14632 }
14633 
14634 /*
14635  * ql_add_link_b
14636  *	Add link to the end of the chain.
14637  *
14638  * Input:
14639  *	head = Head of link list.
14640  *	link = link to be added.
14641  *	LOCK must be already obtained.
14642  *
14643  * Context:
14644  *	Interrupt or Kernel context, no mailbox commands allowed.
14645  */
14646 void
14647 ql_add_link_b(ql_head_t *head, ql_link_t *link)
14648 {
14649 	ASSERT(link->base_address != NULL);
14650 
14651 	/* at the end there isn't a next */
14652 	link->next = NULL;
14653 
14654 	if ((link->prev = head->last) == NULL) {
14655 		head->first = link;
14656 	} else {
14657 		head->last->next = link;
14658 	}
14659 
14660 	head->last = link;
14661 	link->head = head;	/* the queue we're on */
14662 }
14663 
14664 /*
14665  * ql_add_link_t
14666  *	Add link to the beginning of the chain.
14667  *
14668  * Input:
14669  *	head = Head of link list.
14670  *	link = link to be added.
14671  *	LOCK must be already obtained.
14672  *
14673  * Context:
14674  *	Interrupt or Kernel context, no mailbox commands allowed.
14675  */
14676 void
14677 ql_add_link_t(ql_head_t *head, ql_link_t *link)
14678 {
14679 	ASSERT(link->base_address != NULL);
14680 
14681 	link->prev = NULL;
14682 
14683 	if ((link->next = head->first) == NULL)	{
14684 		head->last = link;
14685 	} else {
14686 		head->first->prev = link;
14687 	}
14688 
14689 	head->first = link;
14690 	link->head = head;	/* the queue we're on */
14691 }
14692 
14693 /*
14694  * ql_remove_link
14695  *	Remove a link from the chain.
14696  *
14697  * Input:
14698  *	head = Head of link list.
14699  *	link = link to be removed.
14700  *	LOCK must be already obtained.
14701  *
14702  * Context:
14703  *	Interrupt or Kernel context, no mailbox commands allowed.
14704  */
14705 void
14706 ql_remove_link(ql_head_t *head, ql_link_t *link)
14707 {
14708 	ASSERT(link->base_address != NULL);
14709 
14710 	if (link->prev != NULL) {
14711 		if ((link->prev->next = link->next) == NULL) {
14712 			head->last = link->prev;
14713 		} else {
14714 			link->next->prev = link->prev;
14715 		}
14716 	} else if ((head->first = link->next) == NULL) {
14717 		head->last = NULL;
14718 	} else {
14719 		head->first->prev = NULL;
14720 	}
14721 
14722 	/* not on a queue any more */
14723 	link->prev = link->next = NULL;
14724 	link->head = NULL;
14725 }
14726 
14727 /*
14728  * ql_chg_endian
14729  *	Change endianess of byte array.
14730  *
14731  * Input:
14732  *	buf = array pointer.
14733  *	size = size of array in bytes.
14734  *
14735  * Context:
14736  *	Interrupt or Kernel context, no mailbox commands allowed.
14737  */
14738 void
14739 ql_chg_endian(uint8_t buf[], size_t size)
14740 {
14741 	uint8_t byte;
14742 	size_t  cnt1;
14743 	size_t  cnt;
14744 
14745 	cnt1 = size - 1;
14746 	for (cnt = 0; cnt < size / 2; cnt++) {
14747 		byte = buf[cnt1];
14748 		buf[cnt1] = buf[cnt];
14749 		buf[cnt] = byte;
14750 		cnt1--;
14751 	}
14752 }
14753 
14754 /*
14755  * ql_bstr_to_dec
14756  *	Convert decimal byte string to number.
14757  *
14758  * Input:
14759  *	s:	byte string pointer.
14760  *	ans:	interger pointer for number.
14761  *	size:	number of ascii bytes.
14762  *
14763  * Returns:
14764  *	success = number of ascii bytes processed.
14765  *
14766  * Context:
14767  *	Kernel/Interrupt context.
14768  */
14769 static int
14770 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
14771 {
14772 	int			mul, num, cnt, pos;
14773 	char			*str;
14774 
14775 	/* Calculate size of number. */
14776 	if (size == 0) {
14777 		for (str = s; *str >= '0' && *str <= '9'; str++) {
14778 			size++;
14779 		}
14780 	}
14781 
14782 	*ans = 0;
14783 	for (cnt = 0; *s != '\0' && size; size--, cnt++) {
14784 		if (*s >= '0' && *s <= '9') {
14785 			num = *s++ - '0';
14786 		} else {
14787 			break;
14788 		}
14789 
14790 		for (mul = 1, pos = 1; pos < size; pos++) {
14791 			mul *= 10;
14792 		}
14793 		*ans += num * mul;
14794 	}
14795 
14796 	return (cnt);
14797 }
14798 
14799 /*
14800  * ql_delay
14801  *	Calls delay routine if threads are not suspended, otherwise, busy waits
14802  *	Minimum = 1 tick = 10ms
14803  *
14804  * Input:
14805  *	dly = delay time in microseconds.
14806  *
14807  * Context:
14808  *	Kernel or Interrupt context, no mailbox commands allowed.
14809  */
14810 void
14811 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
14812 {
14813 	if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
14814 		drv_usecwait(usecs);
14815 	} else {
14816 		delay(drv_usectohz(usecs));
14817 	}
14818 }
14819 
14820 /*
14821  * ql_stall_drv
14822  *	Stalls one or all driver instances, waits for 30 seconds.
14823  *
14824  * Input:
14825  *	ha:		adapter state pointer or NULL for all.
14826  *	options:	BIT_0 --> leave driver stalled on exit if
14827  *				  failed.
14828  *
14829  * Returns:
14830  *	ql local function return status code.
14831  *
14832  * Context:
14833  *	Kernel context.
14834  */
14835 int
14836 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
14837 {
14838 	ql_link_t		*link;
14839 	ql_adapter_state_t	*ha2;
14840 	uint32_t		timer;
14841 
14842 	/* Wait for 30 seconds for daemons unstall. */
14843 	timer = 3000;
14844 	link = ha == NULL ? ql_hba.first : &ha->hba;
14845 	while (link != NULL && timer) {
14846 		ha2 = link->base_address;
14847 
14848 		ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
14849 
14850 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
14851 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
14852 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
14853 		    ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
14854 			link = ha == NULL ? link->next : NULL;
14855 			continue;
14856 		}
14857 
14858 		ql_delay(ha, 10000);
14859 		timer--;
14860 		link = ha == NULL ? ql_hba.first : &ha->hba;
14861 	}
14862 
14863 	if (ha2 != NULL && timer == 0) {
14864 		EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
14865 		    ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
14866 		    "unstalled"));
14867 		if (options & BIT_0) {
14868 			ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
14869 		}
14870 		return (QL_FUNCTION_TIMEOUT);
14871 	}
14872 
14873 	QL_PRINT_3(CE_CONT, "done\n");
14874 
14875 	return (QL_SUCCESS);
14876 }
14877 
14878 /*
14879  * ql_restart_driver
14880  *	Restarts one or all driver instances.
14881  *
14882  * Input:
14883  *	ha:	adapter state pointer or NULL for all.
14884  *
14885  * Context:
14886  *	Kernel context.
14887  */
14888 void
14889 ql_restart_driver(ql_adapter_state_t *ha)
14890 {
14891 	ql_link_t		*link;
14892 	ql_adapter_state_t	*ha2;
14893 	uint32_t		timer;
14894 
14895 	QL_PRINT_3(CE_CONT, "entered\n");
14896 
14897 	/* Tell all daemons to unstall. */
14898 	link = ha == NULL ? ql_hba.first : &ha->hba;
14899 	while (link != NULL) {
14900 		ha2 = link->base_address;
14901 
14902 		ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
14903 
14904 		link = ha == NULL ? link->next : NULL;
14905 	}
14906 
14907 	/* Wait for 30 seconds for all daemons unstall. */
14908 	timer = 3000;
14909 	link = ha == NULL ? ql_hba.first : &ha->hba;
14910 	while (link != NULL && timer) {
14911 		ha2 = link->base_address;
14912 
14913 		if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
14914 		    (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
14915 		    (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
14916 			QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
14917 			    ha2->instance, ha2->vp_index);
14918 			ql_restart_queues(ha2);
14919 			link = ha == NULL ? link->next : NULL;
14920 			continue;
14921 		}
14922 
14923 		QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
14924 		    ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
14925 
14926 		ql_delay(ha, 10000);
14927 		timer--;
14928 		link = ha == NULL ? ql_hba.first : &ha->hba;
14929 	}
14930 
14931 	QL_PRINT_3(CE_CONT, "exiting\n");
14932 }
14933 
14934 /*
14935  * ql_setup_interrupts
14936  *	Sets up interrupts based on the HBA's and platform's
14937  *	capabilities (e.g., legacy / MSI / FIXED).
14938  *
14939  * Input:
14940  *	ha = adapter state pointer.
14941  *
14942  * Returns:
14943  *	DDI_SUCCESS or DDI_FAILURE.
14944  *
14945  * Context:
14946  *	Kernel context.
14947  */
14948 static int
14949 ql_setup_interrupts(ql_adapter_state_t *ha)
14950 {
14951 	int32_t		rval = DDI_FAILURE;
14952 	int32_t		i;
14953 	int32_t		itypes = 0;
14954 
14955 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
14956 
14957 	/*
14958 	 * The Solaris Advanced Interrupt Functions (aif) are only
14959 	 * supported on s10U1 or greater.
14960 	 */
14961 	if (ql_os_release_level < 10 || ql_disable_aif != 0) {
14962 		EL(ha, "interrupt framework is not supported or is "
14963 		    "disabled, using legacy\n");
14964 		return (ql_legacy_intr(ha));
14965 	} else if (ql_os_release_level == 10) {
14966 		/*
14967 		 * See if the advanced interrupt functions (aif) are
14968 		 * in the kernel
14969 		 */
14970 		void	*fptr = (void *)&ddi_intr_get_supported_types;
14971 
14972 		if (fptr == NULL) {
14973 			EL(ha, "aif is not supported, using legacy "
14974 			    "interrupts (rev)\n");
14975 			return (ql_legacy_intr(ha));
14976 		}
14977 	}
14978 
14979 	/* See what types of interrupts this HBA and platform support */
14980 	if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
14981 	    DDI_SUCCESS) {
14982 		EL(ha, "get supported types failed, rval=%xh, "
14983 		    "assuming FIXED\n", i);
14984 		itypes = DDI_INTR_TYPE_FIXED;
14985 	}
14986 
14987 	EL(ha, "supported types are: %xh\n", itypes);
14988 
14989 	if ((itypes & DDI_INTR_TYPE_MSIX) &&
14990 	    (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
14991 		EL(ha, "successful MSI-X setup\n");
14992 	} else if ((itypes & DDI_INTR_TYPE_MSI) &&
14993 	    (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
14994 		EL(ha, "successful MSI setup\n");
14995 	} else {
14996 		rval = ql_setup_fixed(ha);
14997 	}
14998 
14999 	if (rval != DDI_SUCCESS) {
15000 		EL(ha, "failed, aif, rval=%xh\n", rval);
15001 	} else {
15002 		/*EMPTY*/
15003 		QL_PRINT_3(CE_CONT, "(%d): exiting\n");
15004 	}
15005 
15006 	return (rval);
15007 }
15008 
15009 /*
15010  * ql_setup_msi
15011  *	Set up aif MSI interrupts
15012  *
15013  * Input:
15014  *	ha = adapter state pointer.
15015  *
15016  * Returns:
15017  *	DDI_SUCCESS or DDI_FAILURE.
15018  *
15019  * Context:
15020  *	Kernel context.
15021  */
15022 static int
15023 ql_setup_msi(ql_adapter_state_t *ha)
15024 {
15025 	int32_t		count = 0;
15026 	int32_t		avail = 0;
15027 	int32_t		actual = 0;
15028 	int32_t		msitype = DDI_INTR_TYPE_MSI;
15029 	int32_t		ret;
15030 	ql_ifunc_t	itrfun[10] = {0};
15031 
15032 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15033 
15034 	if (ql_disable_msi != 0) {
15035 		EL(ha, "MSI is disabled by user\n");
15036 		return (DDI_FAILURE);
15037 	}
15038 
15039 	/* MSI support is only suported on 24xx HBA's. */
15040 	if (!(CFG_IST(ha, CFG_CTRL_2425))) {
15041 		EL(ha, "HBA does not support MSI\n");
15042 		return (DDI_FAILURE);
15043 	}
15044 
15045 	/* Get number of MSI interrupts the system supports */
15046 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15047 	    DDI_SUCCESS) || count == 0) {
15048 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15049 		return (DDI_FAILURE);
15050 	}
15051 
15052 	/* Get number of available MSI interrupts */
15053 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15054 	    DDI_SUCCESS) || avail == 0) {
15055 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15056 		return (DDI_FAILURE);
15057 	}
15058 
15059 	/* MSI requires only 1.  */
15060 	count = 1;
15061 	itrfun[0].ifunc = &ql_isr_aif;
15062 
15063 	/* Allocate space for interrupt handles */
15064 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15065 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15066 
15067 	ha->iflags |= IFLG_INTR_MSI;
15068 
15069 	/* Allocate the interrupts */
15070 	if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
15071 	    &actual, 0)) != DDI_SUCCESS || actual < count) {
15072 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15073 		    "actual=%xh\n", ret, count, actual);
15074 		ql_release_intr(ha);
15075 		return (DDI_FAILURE);
15076 	}
15077 
15078 	ha->intr_cnt = actual;
15079 
15080 	/* Get interrupt priority */
15081 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15082 	    DDI_SUCCESS) {
15083 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15084 		ql_release_intr(ha);
15085 		return (ret);
15086 	}
15087 
15088 	/* Add the interrupt handler */
15089 	if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
15090 	    (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
15091 		EL(ha, "failed, intr_add ret=%xh\n", ret);
15092 		ql_release_intr(ha);
15093 		return (ret);
15094 	}
15095 
15096 	/* Setup mutexes */
15097 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15098 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15099 		ql_release_intr(ha);
15100 		return (ret);
15101 	}
15102 
15103 	/* Get the capabilities */
15104 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15105 
15106 	/* Enable interrupts */
15107 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15108 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15109 		    DDI_SUCCESS) {
15110 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15111 			ql_destroy_mutex(ha);
15112 			ql_release_intr(ha);
15113 			return (ret);
15114 		}
15115 	} else {
15116 		if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
15117 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15118 			ql_destroy_mutex(ha);
15119 			ql_release_intr(ha);
15120 			return (ret);
15121 		}
15122 	}
15123 
15124 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15125 
15126 	return (DDI_SUCCESS);
15127 }
15128 
15129 /*
15130  * ql_setup_msix
15131  *	Set up aif MSI-X interrupts
15132  *
15133  * Input:
15134  *	ha = adapter state pointer.
15135  *
15136  * Returns:
15137  *	DDI_SUCCESS or DDI_FAILURE.
15138  *
15139  * Context:
15140  *	Kernel context.
15141  */
15142 static int
15143 ql_setup_msix(ql_adapter_state_t *ha)
15144 {
15145 	uint16_t	hwvect;
15146 	int32_t		count = 0;
15147 	int32_t		avail = 0;
15148 	int32_t		actual = 0;
15149 	int32_t		msitype = DDI_INTR_TYPE_MSIX;
15150 	int32_t		ret, i;
15151 	ql_ifunc_t	itrfun[QL_MSIX_MAXAIF] = {0};
15152 
15153 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15154 
15155 	if (ql_disable_msix != 0) {
15156 		EL(ha, "MSI-X is disabled by user\n");
15157 		return (DDI_FAILURE);
15158 	}
15159 
15160 	/*
15161 	 * MSI-X support is only available on 24xx HBA's that have
15162 	 * rev A2 parts (revid = 3) or greater.
15163 	 */
15164 	if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432))) {
15165 		EL(ha, "HBA does not support MSI-X\n");
15166 		return (DDI_FAILURE);
15167 	}
15168 
15169 	if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
15170 		EL(ha, "HBA does not support MSI-X (revid)\n");
15171 		return (DDI_FAILURE);
15172 	}
15173 
15174 	/* Per HP, these HP branded HBA's are not supported with MSI-X */
15175 	if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
15176 	    ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
15177 		EL(ha, "HBA does not support MSI-X (subdevid)\n");
15178 		return (DDI_FAILURE);
15179 	}
15180 
15181 	/* Get the number of 24xx/25xx MSI-X h/w vectors */
15182 	hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
15183 	    ql_pci_config_get16(ha, 0x7e) :
15184 	    ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
15185 
15186 	EL(ha, "pcie config space hwvect = %d\n", hwvect);
15187 
15188 	if (hwvect < QL_MSIX_MAXAIF) {
15189 		EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
15190 		    QL_MSIX_MAXAIF, hwvect);
15191 		return (DDI_FAILURE);
15192 	}
15193 
15194 	/* Get number of MSI-X interrupts the platform h/w supports */
15195 	if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
15196 	    DDI_SUCCESS) || count == 0) {
15197 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15198 		return (DDI_FAILURE);
15199 	}
15200 
15201 	/* Get number of available system interrupts */
15202 	if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
15203 	    DDI_SUCCESS) || avail == 0) {
15204 		EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
15205 		return (DDI_FAILURE);
15206 	}
15207 
15208 	/* Fill out the intr table */
15209 	count = QL_MSIX_MAXAIF;
15210 	itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
15211 	itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
15212 
15213 	/* Allocate space for interrupt handles */
15214 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
15215 	if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
15216 		ha->hsize = 0;
15217 		EL(ha, "failed, unable to allocate htable space\n");
15218 		return (DDI_FAILURE);
15219 	}
15220 
15221 	ha->iflags |= IFLG_INTR_MSIX;
15222 
15223 	/* Allocate the interrupts */
15224 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
15225 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
15226 	    actual < QL_MSIX_MAXAIF) {
15227 		EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
15228 		    "actual=%xh\n", ret, count, actual);
15229 		ql_release_intr(ha);
15230 		return (DDI_FAILURE);
15231 	}
15232 
15233 	ha->intr_cnt = actual;
15234 
15235 	/* Get interrupt priority */
15236 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15237 	    DDI_SUCCESS) {
15238 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15239 		ql_release_intr(ha);
15240 		return (ret);
15241 	}
15242 
15243 	/* Add the interrupt handlers */
15244 	for (i = 0; i < actual; i++) {
15245 		if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
15246 		    (caddr_t)(uintptr_t)ha, (caddr_t)(uintptr_t)i))
15247 		    != DDI_SUCCESS) {
15248 			EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
15249 			    actual, ret);
15250 			ql_release_intr(ha);
15251 			return (ret);
15252 		}
15253 	}
15254 
15255 	/*
15256 	 * duplicate the rest of the intr's
15257 	 * ddi_intr_dup_handler() isn't working on x86 just yet...
15258 	 */
15259 #ifdef __sparc
15260 	for (i = actual; i < hwvect; i++) {
15261 		if ((ret = ddi_intr_dup_handler(ha->htable[0], i,
15262 		    &ha->htable[i])) != DDI_SUCCESS) {
15263 			EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
15264 			    i, actual, ret);
15265 			ql_release_intr(ha);
15266 			return (ret);
15267 		}
15268 	}
15269 #endif
15270 
15271 	/* Setup mutexes */
15272 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15273 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15274 		ql_release_intr(ha);
15275 		return (ret);
15276 	}
15277 
15278 	/* Get the capabilities */
15279 	(void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
15280 
15281 	/* Enable interrupts */
15282 	if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
15283 		if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
15284 		    DDI_SUCCESS) {
15285 			EL(ha, "failed, block enable, ret=%xh\n", ret);
15286 			ql_destroy_mutex(ha);
15287 			ql_release_intr(ha);
15288 			return (ret);
15289 		}
15290 	} else {
15291 		for (i = 0; i < ha->intr_cnt; i++) {
15292 			if ((ret = ddi_intr_enable(ha->htable[i])) !=
15293 			    DDI_SUCCESS) {
15294 				EL(ha, "failed, intr enable, ret=%xh\n", ret);
15295 				ql_destroy_mutex(ha);
15296 				ql_release_intr(ha);
15297 				return (ret);
15298 			}
15299 		}
15300 	}
15301 
15302 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15303 
15304 	return (DDI_SUCCESS);
15305 }
15306 
15307 /*
15308  * ql_setup_fixed
15309  *	Sets up aif FIXED interrupts
15310  *
15311  * Input:
15312  *	ha = adapter state pointer.
15313  *
15314  * Returns:
15315  *	DDI_SUCCESS or DDI_FAILURE.
15316  *
15317  * Context:
15318  *	Kernel context.
15319  */
15320 static int
15321 ql_setup_fixed(ql_adapter_state_t *ha)
15322 {
15323 	int32_t		count = 0;
15324 	int32_t		actual = 0;
15325 	int32_t		ret, i;
15326 
15327 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15328 
15329 	/* Get number of fixed interrupts the system supports */
15330 	if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
15331 	    &count)) != DDI_SUCCESS) || count == 0) {
15332 		EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
15333 		return (DDI_FAILURE);
15334 	}
15335 
15336 	ha->iflags |= IFLG_INTR_FIXED;
15337 
15338 	/* Allocate space for interrupt handles */
15339 	ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
15340 	ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
15341 
15342 	/* Allocate the interrupts */
15343 	if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
15344 	    0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
15345 	    actual < count) {
15346 		EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
15347 		    "actual=%xh\n", ret, count, actual);
15348 		ql_release_intr(ha);
15349 		return (DDI_FAILURE);
15350 	}
15351 
15352 	ha->intr_cnt = actual;
15353 
15354 	/* Get interrupt priority */
15355 	if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
15356 	    DDI_SUCCESS) {
15357 		EL(ha, "failed, get_pri ret=%xh\n", ret);
15358 		ql_release_intr(ha);
15359 		return (ret);
15360 	}
15361 
15362 	/* Add the interrupt handlers */
15363 	for (i = 0; i < ha->intr_cnt; i++) {
15364 		if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
15365 		    (caddr_t)(uintptr_t)ha,
15366 		    (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
15367 		    EL(ha, "failed, intr_add ret=%xh\n", ret);
15368 			ql_release_intr(ha);
15369 			return (ret);
15370 		}
15371 	}
15372 
15373 	/* Setup mutexes */
15374 	if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
15375 		EL(ha, "failed, mutex init ret=%xh\n", ret);
15376 		ql_release_intr(ha);
15377 		return (ret);
15378 	}
15379 
15380 	/* Enable interrupts */
15381 	for (i = 0; i < ha->intr_cnt; i++) {
15382 		if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
15383 			EL(ha, "failed, intr enable, ret=%xh\n", ret);
15384 			ql_destroy_mutex(ha);
15385 			ql_release_intr(ha);
15386 			return (ret);
15387 		}
15388 	}
15389 
15390 	EL(ha, "using FIXED interupts\n");
15391 
15392 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15393 
15394 	return (DDI_SUCCESS);
15395 }
15396 
15397 /*
15398  * ql_disable_intr
15399  *	Disables interrupts
15400  *
15401  * Input:
15402  *	ha = adapter state pointer.
15403  *
15404  * Returns:
15405  *
15406  * Context:
15407  *	Kernel context.
15408  */
15409 static void
15410 ql_disable_intr(ql_adapter_state_t *ha)
15411 {
15412 	uint32_t	i, rval;
15413 
15414 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15415 
15416 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15417 
15418 		/* Disable legacy interrupts */
15419 		(void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
15420 
15421 	} else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
15422 	    (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
15423 
15424 		/* Remove AIF block interrupts (MSI) */
15425 		if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
15426 		    != DDI_SUCCESS) {
15427 			EL(ha, "failed intr block disable, rval=%x\n", rval);
15428 		}
15429 
15430 	} else {
15431 
15432 		/* Remove AIF non-block interrupts (fixed).  */
15433 		for (i = 0; i < ha->intr_cnt; i++) {
15434 			if ((rval = ddi_intr_disable(ha->htable[i])) !=
15435 			    DDI_SUCCESS) {
15436 				EL(ha, "failed intr disable, intr#=%xh, "
15437 				    "rval=%xh\n", i, rval);
15438 			}
15439 		}
15440 	}
15441 
15442 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15443 }
15444 
15445 /*
15446  * ql_release_intr
15447  *	Releases aif legacy interrupt resources
15448  *
15449  * Input:
15450  *	ha = adapter state pointer.
15451  *
15452  * Returns:
15453  *
15454  * Context:
15455  *	Kernel context.
15456  */
15457 static void
15458 ql_release_intr(ql_adapter_state_t *ha)
15459 {
15460 	int32_t 	i;
15461 
15462 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15463 
15464 	if (!(ha->iflags & IFLG_INTR_AIF)) {
15465 		QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15466 		return;
15467 	}
15468 
15469 	ha->iflags &= ~(IFLG_INTR_AIF);
15470 	if (ha->htable != NULL && ha->hsize > 0) {
15471 		i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
15472 		while (i-- > 0) {
15473 			if (ha->htable[i] == 0) {
15474 				EL(ha, "htable[%x]=0h\n", i);
15475 				continue;
15476 			}
15477 
15478 			(void) ddi_intr_disable(ha->htable[i]);
15479 
15480 			if (i < ha->intr_cnt) {
15481 				(void) ddi_intr_remove_handler(ha->htable[i]);
15482 			}
15483 
15484 			(void) ddi_intr_free(ha->htable[i]);
15485 		}
15486 
15487 		kmem_free(ha->htable, ha->hsize);
15488 		ha->htable = NULL;
15489 	}
15490 
15491 	ha->hsize = 0;
15492 	ha->intr_cnt = 0;
15493 	ha->intr_pri = 0;
15494 	ha->intr_cap = 0;
15495 
15496 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15497 }
15498 
15499 /*
15500  * ql_legacy_intr
15501  *	Sets up legacy interrupts.
15502  *
15503  *	NB: Only to be used if AIF (Advanced Interupt Framework)
15504  *	    if NOT in the kernel.
15505  *
15506  * Input:
15507  *	ha = adapter state pointer.
15508  *
15509  * Returns:
15510  *	DDI_SUCCESS or DDI_FAILURE.
15511  *
15512  * Context:
15513  *	Kernel context.
15514  */
15515 static int
15516 ql_legacy_intr(ql_adapter_state_t *ha)
15517 {
15518 	int	rval = DDI_SUCCESS;
15519 
15520 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15521 
15522 	/* Setup mutexes */
15523 	if (ql_init_mutex(ha) != DDI_SUCCESS) {
15524 		EL(ha, "failed, mutex init\n");
15525 		return (DDI_FAILURE);
15526 	}
15527 
15528 	/* Setup standard/legacy interrupt handler */
15529 	if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
15530 	    (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
15531 		cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
15532 		    QL_NAME, ha->instance);
15533 		ql_destroy_mutex(ha);
15534 		rval = DDI_FAILURE;
15535 	}
15536 
15537 	if (rval == DDI_SUCCESS) {
15538 		ha->iflags |= IFLG_INTR_LEGACY;
15539 		EL(ha, "using legacy interrupts\n");
15540 	}
15541 
15542 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15543 
15544 	return (rval);
15545 }
15546 
15547 /*
15548  * ql_init_mutex
15549  *	Initializes mutex's
15550  *
15551  * Input:
15552  *	ha = adapter state pointer.
15553  *
15554  * Returns:
15555  *	DDI_SUCCESS or DDI_FAILURE.
15556  *
15557  * Context:
15558  *	Kernel context.
15559  */
15560 static int
15561 ql_init_mutex(ql_adapter_state_t *ha)
15562 {
15563 	int	ret;
15564 	void	*intr;
15565 
15566 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15567 
15568 	if (ha->iflags & IFLG_INTR_AIF) {
15569 		intr = (void *)(uintptr_t)ha->intr_pri;
15570 	} else {
15571 		/* Get iblock cookies to initialize mutexes */
15572 		if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
15573 		    &ha->iblock_cookie)) != DDI_SUCCESS) {
15574 			EL(ha, "failed, get_iblock: %xh\n", ret);
15575 			return (DDI_FAILURE);
15576 		}
15577 		intr = (void *)ha->iblock_cookie;
15578 	}
15579 
15580 	/* mutexes to protect the adapter state structure. */
15581 	mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
15582 
15583 	/* mutex to protect the ISP response ring. */
15584 	mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
15585 
15586 	/* mutex to protect the mailbox registers. */
15587 	mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
15588 
15589 	/* power management protection */
15590 	mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
15591 
15592 	/* Mailbox wait and interrupt conditional variable. */
15593 	cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
15594 	cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
15595 
15596 	/* mutex to protect the ISP request ring. */
15597 	mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
15598 
15599 	/* Unsolicited buffer conditional variable. */
15600 	cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
15601 
15602 	mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
15603 	mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
15604 
15605 	/* Suspended conditional variable. */
15606 	cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
15607 
15608 	/* mutex to protect task daemon context. */
15609 	mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
15610 
15611 	/* Task_daemon thread conditional variable. */
15612 	cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
15613 
15614 	/* mutex to protect notify acknowledge list */
15615 	mutex_init(&ha->ql_nack_mtx, NULL, MUTEX_DRIVER, intr);
15616 	ha->ql_nack = NULL;
15617 
15618 	/* mutex to protect diag port manage interface */
15619 	mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
15620 
15621 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15622 
15623 	return (DDI_SUCCESS);
15624 }
15625 
15626 /*
15627  * ql_destroy_mutex
15628  *	Destroys mutex's
15629  *
15630  * Input:
15631  *	ha = adapter state pointer.
15632  *
15633  * Returns:
15634  *
15635  * Context:
15636  *	Kernel context.
15637  */
15638 static void
15639 ql_destroy_mutex(ql_adapter_state_t *ha)
15640 {
15641 	QL_PRINT_3(CE_CONT, "(%d): entered\n", ha->instance);
15642 
15643 	mutex_destroy(&ha->task_daemon_mutex);
15644 	cv_destroy(&ha->cv_task_daemon);
15645 	cv_destroy(&ha->cv_dr_suspended);
15646 	mutex_destroy(&ha->ub_mutex);
15647 	cv_destroy(&ha->cv_ub);
15648 	mutex_destroy(&ha->req_ring_mutex);
15649 	mutex_destroy(&ha->mbx_mutex);
15650 	cv_destroy(&ha->cv_mbx_intr);
15651 	cv_destroy(&ha->cv_mbx_wait);
15652 	mutex_destroy(&ha->pm_mutex);
15653 	mutex_destroy(&ha->intr_mutex);
15654 	mutex_destroy(&ha->portmutex);
15655 	mutex_destroy(&ha->mutex);
15656 	mutex_destroy(&ha->ql_nack_mtx);
15657 	mutex_destroy(&ha->cache_mutex);
15658 
15659 	QL_PRINT_3(CE_CONT, "(%d): exiting\n", ha->instance);
15660 }
15661 
15662 /*
15663  * ql_fwmodule_resolve
15664  *	Loads and resolves external firmware module and symbols
15665  *
15666  * Input:
15667  *	ha:		adapter state pointer.
15668  *
15669  * Returns:
15670  *	ql local function return status code:
15671  *		QL_SUCCESS - external f/w module module and symbols resolved
15672  *		QL_FW_NOT_SUPPORTED - Driver does not support ISP type
15673  *		QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
15674  *		QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
15675  * Context:
15676  *	Kernel context.
15677  *
15678  * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time.  We
15679  * could switch to a tighter scope around acutal download (and add an extra
15680  * ddi_modopen for module opens that occur before root is mounted).
15681  *
15682  */
15683 uint32_t
15684 ql_fwmodule_resolve(ql_adapter_state_t *ha)
15685 {
15686 	int8_t			module[128];
15687 	int8_t			fw_version[128];
15688 	uint32_t		rval = QL_SUCCESS;
15689 	caddr_t			code, code02;
15690 	uint8_t			*p_ucfw;
15691 	uint16_t		*p_usaddr, *p_uslen;
15692 	uint32_t		*p_uiaddr, *p_uilen, *p_uifw;
15693 	uint32_t		*p_uiaddr02, *p_uilen02;
15694 	struct fw_table		*fwt;
15695 	extern struct fw_table	fw_table[];
15696 
15697 	if (ha->fw_module != NULL) {
15698 		EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
15699 		    ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
15700 		    ha->fw_subminor_version);
15701 		return (rval);
15702 	}
15703 
15704 	/* make sure the fw_class is in the fw_table of supported classes */
15705 	for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
15706 		if (fwt->fw_class == ha->fw_class)
15707 			break;			/* match */
15708 	}
15709 	if (fwt->fw_version == NULL) {
15710 		cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
15711 		    "in driver's fw_table", QL_NAME, ha->instance,
15712 		    ha->fw_class);
15713 		return (QL_FW_NOT_SUPPORTED);
15714 	}
15715 
15716 	/*
15717 	 * open the module related to the fw_class
15718 	 */
15719 	(void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
15720 	    ha->fw_class);
15721 
15722 	ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
15723 	if (ha->fw_module == NULL) {
15724 		cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
15725 		    QL_NAME, ha->instance, module);
15726 		return (QL_FWMODLOAD_FAILED);
15727 	}
15728 
15729 	/*
15730 	 * resolve the fw module symbols, data types depend on fw_class
15731 	 */
15732 
15733 	switch (ha->fw_class) {
15734 	case 0x2200:
15735 	case 0x2300:
15736 	case 0x6322:
15737 
15738 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
15739 		    NULL)) == NULL) {
15740 			rval = QL_FWSYM_NOT_FOUND;
15741 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
15742 		} else if ((p_usaddr = ddi_modsym(ha->fw_module,
15743 		    "risc_code_addr01", NULL)) == NULL) {
15744 			rval = QL_FWSYM_NOT_FOUND;
15745 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
15746 		} else if ((p_uslen = ddi_modsym(ha->fw_module,
15747 		    "risc_code_length01", NULL)) == NULL) {
15748 			rval = QL_FWSYM_NOT_FOUND;
15749 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
15750 		} else if ((p_ucfw = ddi_modsym(ha->fw_module,
15751 		    "firmware_version", NULL)) == NULL) {
15752 			rval = QL_FWSYM_NOT_FOUND;
15753 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
15754 		}
15755 
15756 		if (rval == QL_SUCCESS) {
15757 			ha->risc_fw[0].code = code;
15758 			ha->risc_fw[0].addr = *p_usaddr;
15759 			ha->risc_fw[0].length = *p_uslen;
15760 
15761 			(void) snprintf(fw_version, sizeof (fw_version),
15762 			    "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
15763 		}
15764 		break;
15765 
15766 	case 0x2400:
15767 	case 0x2500:
15768 
15769 		if ((code = ddi_modsym(ha->fw_module, "risc_code01",
15770 		    NULL)) == NULL) {
15771 			rval = QL_FWSYM_NOT_FOUND;
15772 			EL(ha, "failed, f/w module %d rc01 symbol\n", module);
15773 		} else if ((p_uiaddr = ddi_modsym(ha->fw_module,
15774 		    "risc_code_addr01", NULL)) == NULL) {
15775 			rval = QL_FWSYM_NOT_FOUND;
15776 			EL(ha, "failed, f/w module %d rca01 symbol\n", module);
15777 		} else if ((p_uilen = ddi_modsym(ha->fw_module,
15778 		    "risc_code_length01", NULL)) == NULL) {
15779 			rval = QL_FWSYM_NOT_FOUND;
15780 			EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
15781 		} else if ((p_uifw = ddi_modsym(ha->fw_module,
15782 		    "firmware_version", NULL)) == NULL) {
15783 			rval = QL_FWSYM_NOT_FOUND;
15784 			EL(ha, "failed, f/w module %d fwver symbol\n", module);
15785 		}
15786 
15787 		if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
15788 		    NULL)) == NULL) {
15789 			rval = QL_FWSYM_NOT_FOUND;
15790 			EL(ha, "failed, f/w module %d rc02 symbol\n", module);
15791 		} else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
15792 		    "risc_code_addr02", NULL)) == NULL) {
15793 			rval = QL_FWSYM_NOT_FOUND;
15794 			EL(ha, "failed, f/w module %d rca02 symbol\n", module);
15795 		} else if ((p_uilen02 = ddi_modsym(ha->fw_module,
15796 		    "risc_code_length02", NULL)) == NULL) {
15797 			rval = QL_FWSYM_NOT_FOUND;
15798 			EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
15799 		}
15800 
15801 		if (rval == QL_SUCCESS) {
15802 			ha->risc_fw[0].code = code;
15803 			ha->risc_fw[0].addr = *p_uiaddr;
15804 			ha->risc_fw[0].length = *p_uilen;
15805 			ha->risc_fw[1].code = code02;
15806 			ha->risc_fw[1].addr = *p_uiaddr02;
15807 			ha->risc_fw[1].length = *p_uilen02;
15808 
15809 			(void) snprintf(fw_version, sizeof (fw_version),
15810 			    "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
15811 		}
15812 		break;
15813 
15814 	default:
15815 		EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
15816 		rval = QL_FW_NOT_SUPPORTED;
15817 	}
15818 
15819 	if (rval != QL_SUCCESS) {
15820 		cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
15821 		    "module %s (%x)", QL_NAME, ha->instance, module, rval);
15822 		if (ha->fw_module != NULL) {
15823 			(void) ddi_modclose(ha->fw_module);
15824 			ha->fw_module = NULL;
15825 		}
15826 	} else {
15827 		/*
15828 		 * check for firmware version mismatch between module and
15829 		 * compiled in fw_table version.
15830 		 */
15831 
15832 		if (strcmp(fwt->fw_version, fw_version) != 0) {
15833 
15834 			/*
15835 			 * If f/w / driver version mismatches then
15836 			 * return a successful status -- however warn
15837 			 * the user that this is NOT recommended.
15838 			 */
15839 
15840 			cmn_err(CE_WARN, "%s(%d): driver / f/w version "
15841 			    "mismatch for %x: driver-%s module-%s", QL_NAME,
15842 			    ha->instance, ha->fw_class, fwt->fw_version,
15843 			    fw_version);
15844 
15845 			ha->cfg_flags |= CFG_FW_MISMATCH;
15846 		} else {
15847 			ha->cfg_flags &= ~CFG_FW_MISMATCH;
15848 		}
15849 	}
15850 
15851 	return (rval);
15852 }
15853 
15854 /*
15855  * ql_port_state
15856  *	Set state on all adapter ports.
15857  *
15858  * Input:
15859  *	ha:	parent adapter state pointer.
15860  *	state:	port state.
15861  *	flags:	task daemon flags to set.
15862  *
15863  * Context:
15864  *	Interrupt or Kernel context, no mailbox commands allowed.
15865  */
15866 void
15867 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
15868 {
15869 	ql_adapter_state_t	*vha;
15870 
15871 	TASK_DAEMON_LOCK(ha);
15872 	for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
15873 		if (FC_PORT_STATE_MASK(vha->state) != state) {
15874 			vha->state = state != FC_STATE_OFFLINE ?
15875 			    (FC_PORT_SPEED_MASK(vha->state) | state) : state;
15876 			vha->task_daemon_flags |= flags;
15877 		}
15878 	}
15879 	ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
15880 	TASK_DAEMON_UNLOCK(ha);
15881 }
15882