xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_xioctl.c (revision 7e322df5ee63a00c1c57398abec50fc1dc54b67a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 
91 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
92 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
94 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
95     uint8_t);
96 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
97 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static int ql_24xx_flash_desc(ql_adapter_state_t *);
99 static int ql_setup_flash(ql_adapter_state_t *);
100 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
101 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
102 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
103     uint32_t, int);
104 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
105     uint8_t);
106 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
107 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
109 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
110 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
115 static uint32_t ql_setup_led(ql_adapter_state_t *);
116 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
117 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
120 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
121 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
122 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
124 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
125 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
126 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
127 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
128 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
129 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
130 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
131 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
138 static void ql_restart_hba(ql_adapter_state_t *);
139 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
140 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
144 static void ql_update_flash_caches(ql_adapter_state_t *);
145 
146 /* ******************************************************************** */
147 /*			External IOCTL support.				*/
148 /* ******************************************************************** */
149 
150 /*
151  * ql_alloc_xioctl_resource
152  *	Allocates resources needed by module code.
153  *
154  * Input:
155  *	ha:		adapter state pointer.
156  *
157  * Returns:
158  *	SYS_ERRNO
159  *
160  * Context:
161  *	Kernel context.
162  */
163 int
164 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
165 {
166 	ql_xioctl_t	*xp;
167 
168 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
169 
170 	if (ha->xioctl != NULL) {
171 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
172 		    ha->instance);
173 		return (0);
174 	}
175 
176 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
177 	if (xp == NULL) {
178 		EL(ha, "failed, kmem_zalloc\n");
179 		return (ENOMEM);
180 	}
181 	ha->xioctl = xp;
182 
183 	/* Allocate AEN tracking buffer */
184 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
185 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
186 	if (xp->aen_tracking_queue == NULL) {
187 		EL(ha, "failed, kmem_zalloc-2\n");
188 		ql_free_xioctl_resource(ha);
189 		return (ENOMEM);
190 	}
191 
192 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
193 
194 	return (0);
195 }
196 
197 /*
198  * ql_free_xioctl_resource
199  *	Frees resources used by module code.
200  *
201  * Input:
202  *	ha:		adapter state pointer.
203  *
204  * Context:
205  *	Kernel context.
206  */
207 void
208 ql_free_xioctl_resource(ql_adapter_state_t *ha)
209 {
210 	ql_xioctl_t	*xp = ha->xioctl;
211 
212 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
213 
214 	if (xp == NULL) {
215 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
216 		return;
217 	}
218 
219 	if (xp->aen_tracking_queue != NULL) {
220 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
221 		    sizeof (EXT_ASYNC_EVENT));
222 		xp->aen_tracking_queue = NULL;
223 	}
224 
225 	kmem_free(xp, sizeof (ql_xioctl_t));
226 	ha->xioctl = NULL;
227 
228 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
229 }
230 
231 /*
232  * ql_xioctl
233  *	External IOCTL processing.
234  *
235  * Input:
236  *	ha:	adapter state pointer.
237  *	cmd:	function to perform
238  *	arg:	data type varies with request
239  *	mode:	flags
240  *	cred_p:	credentials pointer
241  *	rval_p:	pointer to result value
242  *
243  * Returns:
244  *	0:		success
245  *	ENXIO:		No such device or address
246  *	ENOPROTOOPT:	Protocol not available
247  *
248  * Context:
249  *	Kernel context.
250  */
251 /* ARGSUSED */
252 int
253 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
254     cred_t *cred_p, int *rval_p)
255 {
256 	int	rval;
257 
258 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
259 
260 	if (ha->xioctl == NULL) {
261 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
262 		return (ENXIO);
263 	}
264 
265 	switch (cmd) {
266 	case EXT_CC_QUERY:
267 	case EXT_CC_SEND_FCCT_PASSTHRU:
268 	case EXT_CC_REG_AEN:
269 	case EXT_CC_GET_AEN:
270 	case EXT_CC_SEND_SCSI_PASSTHRU:
271 	case EXT_CC_WWPN_TO_SCSIADDR:
272 	case EXT_CC_SEND_ELS_RNID:
273 	case EXT_CC_SET_DATA:
274 	case EXT_CC_GET_DATA:
275 	case EXT_CC_HOST_IDX:
276 	case EXT_CC_READ_NVRAM:
277 	case EXT_CC_UPDATE_NVRAM:
278 	case EXT_CC_READ_OPTION_ROM:
279 	case EXT_CC_READ_OPTION_ROM_EX:
280 	case EXT_CC_UPDATE_OPTION_ROM:
281 	case EXT_CC_UPDATE_OPTION_ROM_EX:
282 	case EXT_CC_GET_VPD:
283 	case EXT_CC_SET_VPD:
284 	case EXT_CC_LOOPBACK:
285 	case EXT_CC_GET_FCACHE:
286 	case EXT_CC_GET_FCACHE_EX:
287 	case EXT_CC_HOST_DRVNAME:
288 	case EXT_CC_GET_SFP_DATA:
289 	case EXT_CC_PORT_PARAM:
290 	case EXT_CC_GET_PCI_DATA:
291 	case EXT_CC_GET_FWEXTTRACE:
292 	case EXT_CC_GET_FWFCETRACE:
293 	case EXT_CC_GET_VP_CNT_ID:
294 	case EXT_CC_VPORT_CMD:
295 	case EXT_CC_ACCESS_FLASH:
296 	case EXT_CC_RESET_FW:
297 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
298 		break;
299 	default:
300 		/* function not supported. */
301 		EL(ha, "function=%d not supported\n", cmd);
302 		rval = ENOPROTOOPT;
303 	}
304 
305 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
306 
307 	return (rval);
308 }
309 
310 /*
311  * ql_sdm_ioctl
312  *	Provides ioctl functions for SAN/Device Management functions
313  *	AKA External Ioctl functions.
314  *
315  * Input:
316  *	ha:		adapter state pointer.
317  *	ioctl_code:	ioctl function to perform
318  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
319  *	mode:		flags
320  *
321  * Returns:
322  *	0:	success
323  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
324  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
325  *		copyout of EXT_IOCTL status info failed.
326  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
327  *	EBUSY:	Device busy
328  *
329  * Context:
330  *	Kernel context.
331  */
332 static int
333 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
334 {
335 	EXT_IOCTL		*cmd;
336 	int			rval;
337 	ql_adapter_state_t	*vha;
338 
339 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
340 
341 	/* Copy argument structure (EXT_IOCTL) from application land. */
342 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
343 	    ql_validate_signature)) != 0) {
344 		/*
345 		 * a non-zero value at this time means a problem getting
346 		 * the requested information from application land, just
347 		 * return the error code and hope for the best.
348 		 */
349 		EL(ha, "failed, sdm_setup\n");
350 		return (rval);
351 	}
352 
353 	/*
354 	 * Map the physical ha ptr (which the ioctl is called with)
355 	 * to the virtual ha that the caller is addressing.
356 	 */
357 	if (ha->flags & VP_ENABLED) {
358 		/*
359 		 * Special case: HbaSelect == 0 is physical ha
360 		 */
361 		if (cmd->HbaSelect != 0) {
362 			vha = ha->vp_next;
363 			while (vha != NULL) {
364 				if (vha->vp_index == cmd->HbaSelect) {
365 					ha = vha;
366 					break;
367 				}
368 				vha = vha->vp_next;
369 			}
370 
371 			/*
372 			 * If we can't find the specified vp index then
373 			 * we probably have an error (vp indexes shifting
374 			 * under our feet?).
375 			 */
376 			if (vha == NULL) {
377 				EL(ha, "Invalid HbaSelect vp index: %xh\n",
378 				    cmd->HbaSelect);
379 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
380 				cmd->ResponseLen = 0;
381 				return (EFAULT);
382 			}
383 		}
384 	}
385 
386 	/*
387 	 * If driver is suspended, stalled, or powered down rtn BUSY
388 	 */
389 	if (ha->flags & ADAPTER_SUSPENDED ||
390 	    ha->task_daemon_flags & DRIVER_STALL ||
391 	    ha->power_level != PM_LEVEL_D0) {
392 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
393 		    "driver suspended" :
394 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
395 		    "FCA powered down"));
396 		cmd->Status = EXT_STATUS_BUSY;
397 		cmd->ResponseLen = 0;
398 		rval = EBUSY;
399 
400 		/* Return results to caller */
401 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
402 			EL(ha, "failed, sdm_return\n");
403 			rval = EFAULT;
404 		}
405 		return (rval);
406 	}
407 
408 	switch (ioctl_code) {
409 	case EXT_CC_QUERY_OS:
410 		ql_query(ha, cmd, mode);
411 		break;
412 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
413 		ql_fcct(ha, cmd, mode);
414 		break;
415 	case EXT_CC_REG_AEN_OS:
416 		ql_aen_reg(ha, cmd, mode);
417 		break;
418 	case EXT_CC_GET_AEN_OS:
419 		ql_aen_get(ha, cmd, mode);
420 		break;
421 	case EXT_CC_GET_DATA_OS:
422 		ql_get_host_data(ha, cmd, mode);
423 		break;
424 	case EXT_CC_SET_DATA_OS:
425 		ql_set_host_data(ha, cmd, mode);
426 		break;
427 	case EXT_CC_SEND_ELS_RNID_OS:
428 		ql_send_els_rnid(ha, cmd, mode);
429 		break;
430 	case EXT_CC_SCSI_PASSTHRU_OS:
431 		ql_scsi_passthru(ha, cmd, mode);
432 		break;
433 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
434 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
435 		break;
436 	case EXT_CC_HOST_IDX_OS:
437 		ql_host_idx(ha, cmd, mode);
438 		break;
439 	case EXT_CC_HOST_DRVNAME_OS:
440 		ql_host_drvname(ha, cmd, mode);
441 		break;
442 	case EXT_CC_READ_NVRAM_OS:
443 		ql_read_nvram(ha, cmd, mode);
444 		break;
445 	case EXT_CC_UPDATE_NVRAM_OS:
446 		ql_write_nvram(ha, cmd, mode);
447 		break;
448 	case EXT_CC_READ_OPTION_ROM_OS:
449 	case EXT_CC_READ_OPTION_ROM_EX_OS:
450 		ql_read_flash(ha, cmd, mode);
451 		break;
452 	case EXT_CC_UPDATE_OPTION_ROM_OS:
453 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
454 		ql_write_flash(ha, cmd, mode);
455 		break;
456 	case EXT_CC_LOOPBACK_OS:
457 		ql_diagnostic_loopback(ha, cmd, mode);
458 		break;
459 	case EXT_CC_GET_VPD_OS:
460 		ql_read_vpd(ha, cmd, mode);
461 		break;
462 	case EXT_CC_SET_VPD_OS:
463 		ql_write_vpd(ha, cmd, mode);
464 		break;
465 	case EXT_CC_GET_FCACHE_OS:
466 		ql_get_fcache(ha, cmd, mode);
467 		break;
468 	case EXT_CC_GET_FCACHE_EX_OS:
469 		ql_get_fcache_ex(ha, cmd, mode);
470 		break;
471 	case EXT_CC_GET_SFP_DATA_OS:
472 		ql_get_sfp(ha, cmd, mode);
473 		break;
474 	case EXT_CC_PORT_PARAM_OS:
475 		ql_port_param(ha, cmd, mode);
476 		break;
477 	case EXT_CC_GET_PCI_DATA_OS:
478 		ql_get_pci_data(ha, cmd, mode);
479 		break;
480 	case EXT_CC_GET_FWEXTTRACE_OS:
481 		ql_get_fwexttrace(ha, cmd, mode);
482 		break;
483 	case EXT_CC_GET_FWFCETRACE_OS:
484 		ql_get_fwfcetrace(ha, cmd, mode);
485 		break;
486 	case EXT_CC_MENLO_RESET:
487 		ql_menlo_reset(ha, cmd, mode);
488 		break;
489 	case EXT_CC_MENLO_GET_FW_VERSION:
490 		ql_menlo_get_fw_version(ha, cmd, mode);
491 		break;
492 	case EXT_CC_MENLO_UPDATE_FW:
493 		ql_menlo_update_fw(ha, cmd, mode);
494 		break;
495 	case EXT_CC_MENLO_MANAGE_INFO:
496 		ql_menlo_manage_info(ha, cmd, mode);
497 		break;
498 	case EXT_CC_GET_VP_CNT_ID_OS:
499 		ql_get_vp_cnt_id(ha, cmd, mode);
500 		break;
501 	case EXT_CC_VPORT_CMD_OS:
502 		ql_vp_ioctl(ha, cmd, mode);
503 		break;
504 	case EXT_CC_ACCESS_FLASH_OS:
505 		ql_access_flash(ha, cmd, mode);
506 		break;
507 	case EXT_CC_RESET_FW_OS:
508 		ql_reset_cmd(ha, cmd);
509 		break;
510 	default:
511 		/* function not supported. */
512 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
513 
514 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
515 		cmd->ResponseLen = 0;
516 		break;
517 	}
518 
519 	/* Return results to caller */
520 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
521 		EL(ha, "failed, sdm_return\n");
522 		return (EFAULT);
523 	}
524 
525 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
526 
527 	return (0);
528 }
529 
530 /*
531  * ql_sdm_setup
532  *	Make a local copy of the EXT_IOCTL struct and validate it.
533  *
534  * Input:
535  *	ha:		adapter state pointer.
536  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
537  *	arg:		Address of application EXT_IOCTL cmd data
538  *	mode:		flags
539  *	val_sig:	Pointer to a function to validate the ioctl signature.
540  *
541  * Returns:
542  *	0:		success
543  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
544  *	EINVAL:		Invalid version, signature.
545  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
546  *
547  * Context:
548  *	Kernel context.
549  */
550 static int
551 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
552     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
553 {
554 	int		rval;
555 	EXT_IOCTL	*cmd;
556 
557 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
558 
559 	/* Allocate local memory for EXT_IOCTL. */
560 	*cmd_struct = NULL;
561 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
562 	if (cmd == NULL) {
563 		EL(ha, "failed, kmem_zalloc\n");
564 		return (ENOMEM);
565 	}
566 	/* Get argument structure. */
567 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
568 	if (rval != 0) {
569 		EL(ha, "failed, ddi_copyin\n");
570 		rval = EFAULT;
571 	} else {
572 		/*
573 		 * Check signature and the version.
574 		 * If either are not valid then neither is the
575 		 * structure so don't attempt to return any error status
576 		 * because we can't trust what caller's arg points to.
577 		 * Just return the errno.
578 		 */
579 		if (val_sig(cmd) == 0) {
580 			EL(ha, "failed, signature\n");
581 			rval = EINVAL;
582 		} else if (cmd->Version > EXT_VERSION) {
583 			EL(ha, "failed, version\n");
584 			rval = EINVAL;
585 		}
586 	}
587 
588 	if (rval == 0) {
589 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
590 		*cmd_struct = cmd;
591 		cmd->Status = EXT_STATUS_OK;
592 		cmd->DetailStatus = 0;
593 	} else {
594 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
595 	}
596 
597 	return (rval);
598 }
599 
600 /*
601  * ql_validate_signature
602  *	Validate the signature string for an external ioctl call.
603  *
604  * Input:
605  *	sg:	Pointer to EXT_IOCTL signature to validate.
606  *
607  * Returns:
608  *	B_TRUE:		Signature is valid.
609  *	B_FALSE:	Signature is NOT valid.
610  *
611  * Context:
612  *	Kernel context.
613  */
614 static boolean_t
615 ql_validate_signature(EXT_IOCTL *cmd_struct)
616 {
617 	/*
618 	 * Check signature.
619 	 *
620 	 * If signature is not valid then neither is the rest of
621 	 * the structure (e.g., can't trust it), so don't attempt
622 	 * to return any error status other than the errno.
623 	 */
624 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
625 		QL_PRINT_2(CE_CONT, "failed,\n");
626 		return (B_FALSE);
627 	}
628 
629 	return (B_TRUE);
630 }
631 
632 /*
633  * ql_sdm_return
634  *	Copies return data/status to application land for
635  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
636  *
637  * Input:
638  *	ha:		adapter state pointer.
639  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
640  *	ioctl_code:	ioctl function to perform
641  *	arg:		EXT_IOCTL cmd data in application land.
642  *	mode:		flags
643  *
644  * Returns:
645  *	0:	success
646  *	EFAULT:	Copy out error.
647  *
648  * Context:
649  *	Kernel context.
650  */
651 /* ARGSUSED */
652 static int
653 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
654 {
655 	int	rval = 0;
656 
657 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
658 
659 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
660 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
661 	    mode);
662 
663 	rval |= ddi_copyout((void *)&cmd->Status,
664 	    (void *)&(((EXT_IOCTL*)arg)->Status),
665 	    sizeof (cmd->Status), mode);
666 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
667 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
668 	    sizeof (cmd->DetailStatus), mode);
669 
670 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
671 
672 	if (rval != 0) {
673 		/* Some copyout operation failed */
674 		EL(ha, "failed, ddi_copyout\n");
675 		return (EFAULT);
676 	}
677 
678 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
679 
680 	return (0);
681 }
682 
683 /*
684  * ql_query
685  *	Performs all EXT_CC_QUERY functions.
686  *
687  * Input:
688  *	ha:	adapter state pointer.
689  *	cmd:	Local EXT_IOCTL cmd struct pointer.
690  *	mode:	flags.
691  *
692  * Returns:
693  *	None, request status indicated in cmd->Status.
694  *
695  * Context:
696  *	Kernel context.
697  */
698 static void
699 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
700 {
701 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
702 	    cmd->SubCode);
703 
704 	/* case off on command subcode */
705 	switch (cmd->SubCode) {
706 	case EXT_SC_QUERY_HBA_NODE:
707 		ql_qry_hba_node(ha, cmd, mode);
708 		break;
709 	case EXT_SC_QUERY_HBA_PORT:
710 		ql_qry_hba_port(ha, cmd, mode);
711 		break;
712 	case EXT_SC_QUERY_DISC_PORT:
713 		ql_qry_disc_port(ha, cmd, mode);
714 		break;
715 	case EXT_SC_QUERY_DISC_TGT:
716 		ql_qry_disc_tgt(ha, cmd, mode);
717 		break;
718 	case EXT_SC_QUERY_DRIVER:
719 		ql_qry_driver(ha, cmd, mode);
720 		break;
721 	case EXT_SC_QUERY_FW:
722 		ql_qry_fw(ha, cmd, mode);
723 		break;
724 	case EXT_SC_QUERY_CHIP:
725 		ql_qry_chip(ha, cmd, mode);
726 		break;
727 	case EXT_SC_QUERY_DISC_LUN:
728 	default:
729 		/* function not supported. */
730 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
731 		EL(ha, "failed, Unsupported Subcode=%xh\n",
732 		    cmd->SubCode);
733 		break;
734 	}
735 
736 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
737 }
738 
739 /*
740  * ql_qry_hba_node
741  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
742  *
743  * Input:
744  *	ha:	adapter state pointer.
745  *	cmd:	EXT_IOCTL cmd struct pointer.
746  *	mode:	flags.
747  *
748  * Returns:
749  *	None, request status indicated in cmd->Status.
750  *
751  * Context:
752  *	Kernel context.
753  */
754 static void
755 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
756 {
757 	EXT_HBA_NODE	tmp_node = {0};
758 	uint_t		len;
759 	caddr_t		bufp;
760 	ql_mbx_data_t	mr;
761 
762 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
763 
764 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
765 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
766 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
767 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
768 		    "Len=%xh\n", cmd->ResponseLen);
769 		cmd->ResponseLen = 0;
770 		return;
771 	}
772 
773 	/* fill in the values */
774 
775 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
776 	    EXT_DEF_WWN_NAME_SIZE);
777 
778 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
779 
780 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
781 
782 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
783 
784 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
785 
786 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
787 		size_t		verlen;
788 		uint16_t	w;
789 		char		*tmpptr;
790 
791 		verlen = strlen((char *)(tmp_node.DriverVersion));
792 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
793 			EL(ha, "failed, No room for fpga version string\n");
794 		} else {
795 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
796 			    (uint16_t *)
797 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
798 
799 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
800 			if (tmpptr == NULL) {
801 				EL(ha, "Unable to insert fpga version str\n");
802 			} else {
803 				(void) sprintf(tmpptr, "%d.%d",
804 				    ((w & 0xf0) >> 4), (w & 0x0f));
805 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
806 			}
807 		}
808 	}
809 	(void) ql_get_fw_version(ha, &mr);
810 
811 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
812 	    mr.mb[1], mr.mb[2], mr.mb[3]);
813 
814 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
815 		switch (mr.mb[6]) {
816 		case FWATTRIB_EF:
817 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
818 			break;
819 		case FWATTRIB_TP:
820 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
821 			break;
822 		case FWATTRIB_IP:
823 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
824 			break;
825 		case FWATTRIB_IPX:
826 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
827 			break;
828 		case FWATTRIB_FL:
829 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
830 			break;
831 		case FWATTRIB_FPX:
832 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
833 			break;
834 		default:
835 			break;
836 		}
837 	}
838 
839 	/* FCode version. */
840 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
841 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
842 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
843 	    (int *)&len) == DDI_PROP_SUCCESS) {
844 		if (len < EXT_DEF_MAX_STR_SIZE) {
845 			bcopy(bufp, tmp_node.OptRomVersion, len);
846 		} else {
847 			bcopy(bufp, tmp_node.OptRomVersion,
848 			    EXT_DEF_MAX_STR_SIZE - 1);
849 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
850 			    '\0';
851 		}
852 		kmem_free(bufp, len);
853 	} else {
854 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
855 	}
856 	tmp_node.PortCount = 1;
857 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
858 
859 	if (ddi_copyout((void *)&tmp_node,
860 	    (void *)(uintptr_t)(cmd->ResponseAdr),
861 	    sizeof (EXT_HBA_NODE), mode) != 0) {
862 		cmd->Status = EXT_STATUS_COPY_ERR;
863 		cmd->ResponseLen = 0;
864 		EL(ha, "failed, ddi_copyout\n");
865 	} else {
866 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
867 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
868 	}
869 }
870 
871 /*
872  * ql_qry_hba_port
873  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
874  *
875  * Input:
876  *	ha:	adapter state pointer.
877  *	cmd:	EXT_IOCTL cmd struct pointer.
878  *	mode:	flags.
879  *
880  * Returns:
881  *	None, request status indicated in cmd->Status.
882  *
883  * Context:
884  *	Kernel context.
885  */
886 static void
887 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
888 {
889 	ql_link_t	*link;
890 	ql_tgt_t	*tq;
891 	ql_mbx_data_t	mr;
892 	EXT_HBA_PORT	tmp_port = {0};
893 	int		rval;
894 	uint16_t	port_cnt, tgt_cnt, index;
895 
896 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
897 
898 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
899 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
900 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
901 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
902 		    cmd->ResponseLen);
903 		cmd->ResponseLen = 0;
904 		return;
905 	}
906 
907 	/* fill in the values */
908 
909 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
910 	    EXT_DEF_WWN_NAME_SIZE);
911 	tmp_port.Id[0] = 0;
912 	tmp_port.Id[1] = ha->d_id.b.domain;
913 	tmp_port.Id[2] = ha->d_id.b.area;
914 	tmp_port.Id[3] = ha->d_id.b.al_pa;
915 
916 	/* For now we are initiator only driver */
917 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
918 
919 	if (ha->task_daemon_flags & LOOP_DOWN) {
920 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
921 	} else if (DRIVER_SUSPENDED(ha)) {
922 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
923 	} else {
924 		tmp_port.State = EXT_DEF_HBA_OK;
925 	}
926 
927 	if (ha->flags & POINT_TO_POINT) {
928 		tmp_port.Mode = EXT_DEF_P2P_MODE;
929 	} else {
930 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
931 	}
932 	/*
933 	 * fill in the portspeed values.
934 	 *
935 	 * default to not yet negotiated state
936 	 */
937 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
938 
939 	if (tmp_port.State == EXT_DEF_HBA_OK) {
940 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
941 			mr.mb[1] = 0;
942 			mr.mb[2] = 0;
943 			rval = ql_data_rate(ha, &mr);
944 			if (rval != QL_SUCCESS) {
945 				EL(ha, "failed, data_rate=%xh\n", rval);
946 			} else {
947 				switch (mr.mb[1]) {
948 				case IIDMA_RATE_1GB:
949 					tmp_port.PortSpeed =
950 					    EXT_DEF_PORTSPEED_1GBIT;
951 					break;
952 				case IIDMA_RATE_2GB:
953 					tmp_port.PortSpeed =
954 					    EXT_DEF_PORTSPEED_2GBIT;
955 					break;
956 				case IIDMA_RATE_4GB:
957 					tmp_port.PortSpeed =
958 					    EXT_DEF_PORTSPEED_4GBIT;
959 					break;
960 				case IIDMA_RATE_8GB:
961 					tmp_port.PortSpeed =
962 					    EXT_DEF_PORTSPEED_8GBIT;
963 					break;
964 				case IIDMA_RATE_10GB:
965 					tmp_port.PortSpeed =
966 					    EXT_DEF_PORTSPEED_10GBIT;
967 					break;
968 				default:
969 					tmp_port.PortSpeed =
970 					    EXT_DEF_PORTSPEED_UNKNOWN;
971 					EL(ha, "failed, data rate=%xh\n",
972 					    mr.mb[1]);
973 					break;
974 				}
975 			}
976 		} else {
977 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
978 		}
979 	}
980 
981 	/* Report all supported port speeds */
982 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
983 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
984 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
985 		    EXT_DEF_PORTSPEED_1GBIT);
986 		/*
987 		 * Correct supported speeds based on type of
988 		 * sfp that is present
989 		 */
990 		switch (ha->sfp_stat) {
991 		case 1:
992 			/* no sfp detected */
993 			break;
994 		case 2:
995 		case 4:
996 			/* 4GB sfp */
997 			tmp_port.PortSupportedSpeed &=
998 			    ~EXT_DEF_PORTSPEED_8GBIT;
999 			break;
1000 		case 3:
1001 		case 5:
1002 			/* 8GB sfp */
1003 			tmp_port.PortSupportedSpeed &=
1004 			    ~EXT_DEF_PORTSPEED_1GBIT;
1005 			break;
1006 		default:
1007 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1008 			break;
1009 
1010 		}
1011 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
1012 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1013 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1014 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1015 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1016 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1017 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1018 		    EXT_DEF_PORTSPEED_1GBIT);
1019 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1020 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1021 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1022 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1023 	} else {
1024 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1025 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1026 	}
1027 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1028 	port_cnt = 0;
1029 	tgt_cnt = 0;
1030 
1031 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1032 		for (link = ha->dev[index].first; link != NULL;
1033 		    link = link->next) {
1034 			tq = link->base_address;
1035 
1036 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1037 				continue;
1038 			}
1039 
1040 			port_cnt++;
1041 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1042 				tgt_cnt++;
1043 			}
1044 		}
1045 	}
1046 
1047 	tmp_port.DiscPortCount = port_cnt;
1048 	tmp_port.DiscTargetCount = tgt_cnt;
1049 
1050 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1051 
1052 	rval = ddi_copyout((void *)&tmp_port,
1053 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1054 	    sizeof (EXT_HBA_PORT), mode);
1055 	if (rval != 0) {
1056 		cmd->Status = EXT_STATUS_COPY_ERR;
1057 		cmd->ResponseLen = 0;
1058 		EL(ha, "failed, ddi_copyout\n");
1059 	} else {
1060 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1061 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1062 		    ha->instance, port_cnt, tgt_cnt);
1063 	}
1064 }
1065 
1066 /*
1067  * ql_qry_disc_port
1068  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1069  *
1070  * Input:
1071  *	ha:	adapter state pointer.
1072  *	cmd:	EXT_IOCTL cmd struct pointer.
1073  *	mode:	flags.
1074  *
1075  *	cmd->Instance = Port instance in fcport chain.
1076  *
1077  * Returns:
1078  *	None, request status indicated in cmd->Status.
1079  *
1080  * Context:
1081  *	Kernel context.
1082  */
1083 static void
1084 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1085 {
1086 	EXT_DISC_PORT	tmp_port = {0};
1087 	ql_link_t	*link;
1088 	ql_tgt_t	*tq;
1089 	uint16_t	index;
1090 	uint16_t	inst = 0;
1091 
1092 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1093 
1094 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1095 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1096 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1097 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1098 		    cmd->ResponseLen);
1099 		cmd->ResponseLen = 0;
1100 		return;
1101 	}
1102 
1103 	for (link = NULL, index = 0;
1104 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1105 		for (link = ha->dev[index].first; link != NULL;
1106 		    link = link->next) {
1107 			tq = link->base_address;
1108 
1109 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1110 				continue;
1111 			}
1112 			if (inst != cmd->Instance) {
1113 				inst++;
1114 				continue;
1115 			}
1116 
1117 			/* fill in the values */
1118 			bcopy(tq->node_name, tmp_port.WWNN,
1119 			    EXT_DEF_WWN_NAME_SIZE);
1120 			bcopy(tq->port_name, tmp_port.WWPN,
1121 			    EXT_DEF_WWN_NAME_SIZE);
1122 
1123 			break;
1124 		}
1125 	}
1126 
1127 	if (link == NULL) {
1128 		/* no matching device */
1129 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1130 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1131 		cmd->ResponseLen = 0;
1132 		return;
1133 	}
1134 
1135 	tmp_port.Id[0] = 0;
1136 	tmp_port.Id[1] = tq->d_id.b.domain;
1137 	tmp_port.Id[2] = tq->d_id.b.area;
1138 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1139 
1140 	tmp_port.Type = 0;
1141 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1142 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1143 		    EXT_DEF_INITIATOR_DEV);
1144 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1145 		(void) ql_inq_scan(ha, tq, 1);
1146 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1147 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1148 	}
1149 
1150 	if (tq->flags & TQF_FABRIC_DEVICE) {
1151 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1152 	} else {
1153 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1154 	}
1155 
1156 	tmp_port.Status = 0;
1157 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1158 
1159 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1160 
1161 	if (ddi_copyout((void *)&tmp_port,
1162 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1163 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1164 		cmd->Status = EXT_STATUS_COPY_ERR;
1165 		cmd->ResponseLen = 0;
1166 		EL(ha, "failed, ddi_copyout\n");
1167 	} else {
1168 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1169 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1170 	}
1171 }
1172 
1173 /*
1174  * ql_qry_disc_tgt
1175  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1176  *
1177  * Input:
1178  *	ha:		adapter state pointer.
1179  *	cmd:		EXT_IOCTL cmd struct pointer.
1180  *	mode:		flags.
1181  *
1182  *	cmd->Instance = Port instance in fcport chain.
1183  *
1184  * Returns:
1185  *	None, request status indicated in cmd->Status.
1186  *
1187  * Context:
1188  *	Kernel context.
1189  */
1190 static void
1191 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1192 {
1193 	EXT_DISC_TARGET	tmp_tgt = {0};
1194 	ql_link_t	*link;
1195 	ql_tgt_t	*tq;
1196 	uint16_t	index;
1197 	uint16_t	inst = 0;
1198 
1199 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1200 	    cmd->Instance);
1201 
1202 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1203 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1204 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1205 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1206 		    cmd->ResponseLen);
1207 		cmd->ResponseLen = 0;
1208 		return;
1209 	}
1210 
1211 	/* Scan port list for requested target and fill in the values */
1212 	for (link = NULL, index = 0;
1213 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1214 		for (link = ha->dev[index].first; link != NULL;
1215 		    link = link->next) {
1216 			tq = link->base_address;
1217 
1218 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1219 			    tq->flags & TQF_INITIATOR_DEVICE) {
1220 				continue;
1221 			}
1222 			if (inst != cmd->Instance) {
1223 				inst++;
1224 				continue;
1225 			}
1226 
1227 			/* fill in the values */
1228 			bcopy(tq->node_name, tmp_tgt.WWNN,
1229 			    EXT_DEF_WWN_NAME_SIZE);
1230 			bcopy(tq->port_name, tmp_tgt.WWPN,
1231 			    EXT_DEF_WWN_NAME_SIZE);
1232 
1233 			break;
1234 		}
1235 	}
1236 
1237 	if (link == NULL) {
1238 		/* no matching device */
1239 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1240 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1241 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1242 		cmd->ResponseLen = 0;
1243 		return;
1244 	}
1245 	tmp_tgt.Id[0] = 0;
1246 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1247 	tmp_tgt.Id[2] = tq->d_id.b.area;
1248 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1249 
1250 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1251 
1252 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1253 		(void) ql_inq_scan(ha, tq, 1);
1254 	}
1255 
1256 	tmp_tgt.Type = 0;
1257 	if (tq->flags & TQF_TAPE_DEVICE) {
1258 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1259 	}
1260 
1261 	if (tq->flags & TQF_FABRIC_DEVICE) {
1262 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1263 	} else {
1264 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1265 	}
1266 
1267 	tmp_tgt.Status = 0;
1268 
1269 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1270 
1271 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1272 
1273 	if (ddi_copyout((void *)&tmp_tgt,
1274 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1275 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1276 		cmd->Status = EXT_STATUS_COPY_ERR;
1277 		cmd->ResponseLen = 0;
1278 		EL(ha, "failed, ddi_copyout\n");
1279 	} else {
1280 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1281 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1282 	}
1283 }
1284 
1285 /*
1286  * ql_qry_fw
1287  *	Performs EXT_SC_QUERY_FW subfunction.
1288  *
1289  * Input:
1290  *	ha:	adapter state pointer.
1291  *	cmd:	EXT_IOCTL cmd struct pointer.
1292  *	mode:	flags.
1293  *
1294  * Returns:
1295  *	None, request status indicated in cmd->Status.
1296  *
1297  * Context:
1298  *	Kernel context.
1299  */
1300 static void
1301 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1302 {
1303 	ql_mbx_data_t	mr;
1304 	EXT_FW		fw_info = {0};
1305 
1306 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1307 
1308 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1309 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1310 		cmd->DetailStatus = sizeof (EXT_FW);
1311 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1312 		    cmd->ResponseLen);
1313 		cmd->ResponseLen = 0;
1314 		return;
1315 	}
1316 
1317 	(void) ql_get_fw_version(ha, &mr);
1318 
1319 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1320 	    mr.mb[2], mr.mb[2]);
1321 
1322 	fw_info.Attrib = mr.mb[6];
1323 
1324 	if (ddi_copyout((void *)&fw_info,
1325 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1326 	    sizeof (EXT_FW), mode) != 0) {
1327 		cmd->Status = EXT_STATUS_COPY_ERR;
1328 		cmd->ResponseLen = 0;
1329 		EL(ha, "failed, ddi_copyout\n");
1330 		return;
1331 	} else {
1332 		cmd->ResponseLen = sizeof (EXT_FW);
1333 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1334 	}
1335 }
1336 
1337 /*
1338  * ql_qry_chip
1339  *	Performs EXT_SC_QUERY_CHIP subfunction.
1340  *
1341  * Input:
1342  *	ha:	adapter state pointer.
1343  *	cmd:	EXT_IOCTL cmd struct pointer.
1344  *	mode:	flags.
1345  *
1346  * Returns:
1347  *	None, request status indicated in cmd->Status.
1348  *
1349  * Context:
1350  *	Kernel context.
1351  */
1352 static void
1353 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1354 {
1355 	EXT_CHIP	chip = {0};
1356 
1357 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1358 
1359 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1360 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1361 		cmd->DetailStatus = sizeof (EXT_CHIP);
1362 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1363 		    cmd->ResponseLen);
1364 		cmd->ResponseLen = 0;
1365 		return;
1366 	}
1367 
1368 	chip.VendorId = ha->ven_id;
1369 	chip.DeviceId = ha->device_id;
1370 	chip.SubVendorId = ha->subven_id;
1371 	chip.SubSystemId = ha->subsys_id;
1372 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1373 	chip.IoAddrLen = 0x100;
1374 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1375 	chip.MemAddrLen = 0x100;
1376 	chip.ChipRevID = ha->rev_id;
1377 	if (ha->flags & FUNCTION_1) {
1378 		chip.FuncNo = 1;
1379 	}
1380 
1381 	if (ddi_copyout((void *)&chip,
1382 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1383 	    sizeof (EXT_CHIP), mode) != 0) {
1384 		cmd->Status = EXT_STATUS_COPY_ERR;
1385 		cmd->ResponseLen = 0;
1386 		EL(ha, "failed, ddi_copyout\n");
1387 	} else {
1388 		cmd->ResponseLen = sizeof (EXT_CHIP);
1389 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1390 	}
1391 }
1392 
1393 /*
1394  * ql_qry_driver
1395  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1396  *
1397  * Input:
1398  *	ha:	adapter state pointer.
1399  *	cmd:	EXT_IOCTL cmd struct pointer.
1400  *	mode:	flags.
1401  *
1402  * Returns:
1403  *	None, request status indicated in cmd->Status.
1404  *
1405  * Context:
1406  *	Kernel context.
1407  */
1408 static void
1409 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1410 {
1411 	EXT_DRIVER	qd = {0};
1412 
1413 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1414 
1415 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1416 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1417 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1418 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1419 		    cmd->ResponseLen);
1420 		cmd->ResponseLen = 0;
1421 		return;
1422 	}
1423 
1424 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1425 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1426 	qd.TargetsPerBus = (uint16_t)
1427 	    (CFG_IST(ha, (CFG_CTRL_242581 | CFG_EXT_FW_INTERFACE)) ?
1428 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1429 	qd.LunsPerTarget = 2030;
1430 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1431 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1432 
1433 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1434 	    sizeof (EXT_DRIVER), mode) != 0) {
1435 		cmd->Status = EXT_STATUS_COPY_ERR;
1436 		cmd->ResponseLen = 0;
1437 		EL(ha, "failed, ddi_copyout\n");
1438 	} else {
1439 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1440 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1441 	}
1442 }
1443 
1444 /*
1445  * ql_fcct
1446  *	IOCTL management server FC-CT passthrough.
1447  *
1448  * Input:
1449  *	ha:	adapter state pointer.
1450  *	cmd:	User space CT arguments pointer.
1451  *	mode:	flags.
1452  *
1453  * Returns:
1454  *	None, request status indicated in cmd->Status.
1455  *
1456  * Context:
1457  *	Kernel context.
1458  */
1459 static void
1460 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1461 {
1462 	ql_mbx_iocb_t		*pkt;
1463 	ql_mbx_data_t		mr;
1464 	dma_mem_t		*dma_mem;
1465 	caddr_t			pld;
1466 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1467 	int			rval;
1468 	ql_ct_iu_preamble_t	*ct;
1469 	ql_xioctl_t		*xp = ha->xioctl;
1470 	ql_tgt_t		tq;
1471 	uint16_t		comp_status, loop_id;
1472 
1473 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1474 
1475 	/* Get CT argument structure. */
1476 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1477 		EL(ha, "failed, No switch\n");
1478 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1479 		cmd->ResponseLen = 0;
1480 		return;
1481 	}
1482 
1483 	if (DRIVER_SUSPENDED(ha)) {
1484 		EL(ha, "failed, LOOP_NOT_READY\n");
1485 		cmd->Status = EXT_STATUS_BUSY;
1486 		cmd->ResponseLen = 0;
1487 		return;
1488 	}
1489 
1490 	/* Login management server device. */
1491 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1492 		tq.d_id.b.al_pa = 0xfa;
1493 		tq.d_id.b.area = 0xff;
1494 		tq.d_id.b.domain = 0xff;
1495 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1496 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1497 		    MANAGEMENT_SERVER_LOOP_ID);
1498 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1499 		if (rval != QL_SUCCESS) {
1500 			EL(ha, "failed, server login\n");
1501 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1502 			cmd->ResponseLen = 0;
1503 			return;
1504 		} else {
1505 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1506 		}
1507 	}
1508 
1509 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1510 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1511 
1512 	/* Allocate a DMA Memory Descriptor */
1513 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1514 	if (dma_mem == NULL) {
1515 		EL(ha, "failed, kmem_zalloc\n");
1516 		cmd->Status = EXT_STATUS_NO_MEMORY;
1517 		cmd->ResponseLen = 0;
1518 		return;
1519 	}
1520 	/* Determine maximum buffer size. */
1521 	if (cmd->RequestLen < cmd->ResponseLen) {
1522 		pld_byte_cnt = cmd->ResponseLen;
1523 	} else {
1524 		pld_byte_cnt = cmd->RequestLen;
1525 	}
1526 
1527 	/* Allocate command block. */
1528 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1529 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1530 	if (pkt == NULL) {
1531 		EL(ha, "failed, kmem_zalloc\n");
1532 		cmd->Status = EXT_STATUS_NO_MEMORY;
1533 		cmd->ResponseLen = 0;
1534 		return;
1535 	}
1536 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1537 
1538 	/* Get command payload data. */
1539 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1540 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1541 		EL(ha, "failed, get_buffer_data\n");
1542 		kmem_free(pkt, pkt_size);
1543 		cmd->Status = EXT_STATUS_COPY_ERR;
1544 		cmd->ResponseLen = 0;
1545 		return;
1546 	}
1547 
1548 	/* Get DMA memory for the IOCB */
1549 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1550 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1551 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1552 		    "alloc failed", QL_NAME, ha->instance);
1553 		kmem_free(pkt, pkt_size);
1554 		kmem_free(dma_mem, sizeof (dma_mem_t));
1555 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1556 		cmd->ResponseLen = 0;
1557 		return;
1558 	}
1559 
1560 	/* Copy out going payload data to IOCB DMA buffer. */
1561 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1562 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1563 
1564 	/* Sync IOCB DMA buffer. */
1565 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1566 	    DDI_DMA_SYNC_FORDEV);
1567 
1568 	/*
1569 	 * Setup IOCB
1570 	 */
1571 	ct = (ql_ct_iu_preamble_t *)pld;
1572 	if (CFG_IST(ha, CFG_CTRL_242581)) {
1573 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1574 		pkt->ms24.entry_count = 1;
1575 
1576 		/* Set loop ID */
1577 		pkt->ms24.n_port_hdl = (uint16_t)
1578 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1579 		    LE_16(SNS_24XX_HDL) :
1580 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1581 
1582 		/* Set ISP command timeout. */
1583 		pkt->ms24.timeout = LE_16(120);
1584 
1585 		/* Set cmd/response data segment counts. */
1586 		pkt->ms24.cmd_dseg_count = LE_16(1);
1587 		pkt->ms24.resp_dseg_count = LE_16(1);
1588 
1589 		/* Load ct cmd byte count. */
1590 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1591 
1592 		/* Load ct rsp byte count. */
1593 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1594 
1595 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1596 
1597 		/* Load MS command entry data segments. */
1598 		*long_ptr++ = (uint32_t)
1599 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1600 		*long_ptr++ = (uint32_t)
1601 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1602 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1603 
1604 		/* Load MS response entry data segments. */
1605 		*long_ptr++ = (uint32_t)
1606 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1607 		*long_ptr++ = (uint32_t)
1608 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1609 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1610 
1611 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1612 		    sizeof (ql_mbx_iocb_t));
1613 
1614 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1615 		if (comp_status == CS_DATA_UNDERRUN) {
1616 			if ((BE_16(ct->max_residual_size)) == 0) {
1617 				comp_status = CS_COMPLETE;
1618 			}
1619 		}
1620 
1621 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1622 		    0) {
1623 			EL(ha, "failed, I/O timeout or "
1624 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1625 			    pkt->sts24.entry_status,
1626 			    pkt->sts24.scsi_status_l, rval);
1627 			kmem_free(pkt, pkt_size);
1628 			ql_free_dma_resource(ha, dma_mem);
1629 			kmem_free(dma_mem, sizeof (dma_mem_t));
1630 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1631 			cmd->ResponseLen = 0;
1632 			return;
1633 		}
1634 	} else {
1635 		pkt->ms.entry_type = MS_TYPE;
1636 		pkt->ms.entry_count = 1;
1637 
1638 		/* Set loop ID */
1639 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1640 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1641 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1642 			pkt->ms.loop_id_l = LSB(loop_id);
1643 			pkt->ms.loop_id_h = MSB(loop_id);
1644 		} else {
1645 			pkt->ms.loop_id_h = LSB(loop_id);
1646 		}
1647 
1648 		/* Set ISP command timeout. */
1649 		pkt->ms.timeout = LE_16(120);
1650 
1651 		/* Set data segment counts. */
1652 		pkt->ms.cmd_dseg_count_l = 1;
1653 		pkt->ms.total_dseg_count = LE_16(2);
1654 
1655 		/* Response total byte count. */
1656 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1657 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1658 
1659 		/* Command total byte count. */
1660 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1661 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1662 
1663 		/* Load command/response data segments. */
1664 		pkt->ms.dseg_0_address[0] = (uint32_t)
1665 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1666 		pkt->ms.dseg_0_address[1] = (uint32_t)
1667 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1668 		pkt->ms.dseg_1_address[0] = (uint32_t)
1669 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1670 		pkt->ms.dseg_1_address[1] = (uint32_t)
1671 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1672 
1673 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1674 		    sizeof (ql_mbx_iocb_t));
1675 
1676 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1677 		if (comp_status == CS_DATA_UNDERRUN) {
1678 			if ((BE_16(ct->max_residual_size)) == 0) {
1679 				comp_status = CS_COMPLETE;
1680 			}
1681 		}
1682 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1683 			EL(ha, "failed, I/O timeout or "
1684 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1685 			kmem_free(pkt, pkt_size);
1686 			ql_free_dma_resource(ha, dma_mem);
1687 			kmem_free(dma_mem, sizeof (dma_mem_t));
1688 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1689 			cmd->ResponseLen = 0;
1690 			return;
1691 		}
1692 	}
1693 
1694 	/* Sync in coming DMA buffer. */
1695 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1696 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1697 	/* Copy in coming DMA data. */
1698 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1699 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1700 	    DDI_DEV_AUTOINCR);
1701 
1702 	/* Copy response payload from DMA buffer to application. */
1703 	if (cmd->ResponseLen != 0) {
1704 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1705 		    cmd->ResponseLen);
1706 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1707 
1708 		/* Send response payload. */
1709 		if (ql_send_buffer_data(pld,
1710 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1711 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1712 			EL(ha, "failed, send_buffer_data\n");
1713 			cmd->Status = EXT_STATUS_COPY_ERR;
1714 			cmd->ResponseLen = 0;
1715 		}
1716 	}
1717 
1718 	kmem_free(pkt, pkt_size);
1719 	ql_free_dma_resource(ha, dma_mem);
1720 	kmem_free(dma_mem, sizeof (dma_mem_t));
1721 
1722 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1723 }
1724 
1725 /*
1726  * ql_aen_reg
1727  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1728  *
1729  * Input:
1730  *	ha:	adapter state pointer.
1731  *	cmd:	EXT_IOCTL cmd struct pointer.
1732  *	mode:	flags.
1733  *
1734  * Returns:
1735  *	None, request status indicated in cmd->Status.
1736  *
1737  * Context:
1738  *	Kernel context.
1739  */
1740 static void
1741 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1742 {
1743 	EXT_REG_AEN	reg_struct;
1744 	int		rval = 0;
1745 	ql_xioctl_t	*xp = ha->xioctl;
1746 
1747 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1748 
1749 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1750 	    cmd->RequestLen, mode);
1751 
1752 	if (rval == 0) {
1753 		if (reg_struct.Enable) {
1754 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1755 		} else {
1756 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1757 			/* Empty the queue. */
1758 			INTR_LOCK(ha);
1759 			xp->aen_q_head = 0;
1760 			xp->aen_q_tail = 0;
1761 			INTR_UNLOCK(ha);
1762 		}
1763 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1764 	} else {
1765 		cmd->Status = EXT_STATUS_COPY_ERR;
1766 		EL(ha, "failed, ddi_copyin\n");
1767 	}
1768 }
1769 
1770 /*
1771  * ql_aen_get
1772  *	IOCTL management server Asynchronous Event Record Transfer.
1773  *
1774  * Input:
1775  *	ha:	adapter state pointer.
1776  *	cmd:	EXT_IOCTL cmd struct pointer.
1777  *	mode:	flags.
1778  *
1779  * Returns:
1780  *	None, request status indicated in cmd->Status.
1781  *
1782  * Context:
1783  *	Kernel context.
1784  */
1785 static void
1786 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1787 {
1788 	uint32_t	out_size;
1789 	EXT_ASYNC_EVENT	*tmp_q;
1790 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1791 	uint8_t		i;
1792 	uint8_t		queue_cnt;
1793 	uint8_t		request_cnt;
1794 	ql_xioctl_t	*xp = ha->xioctl;
1795 
1796 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1797 
1798 	/* Compute the number of events that can be returned */
1799 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1800 
1801 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1802 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1803 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1804 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1805 		    "Len=%xh\n", request_cnt);
1806 		cmd->ResponseLen = 0;
1807 		return;
1808 	}
1809 
1810 	/* 1st: Make a local copy of the entire queue content. */
1811 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1812 	queue_cnt = 0;
1813 
1814 	INTR_LOCK(ha);
1815 	i = xp->aen_q_head;
1816 
1817 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1818 		if (tmp_q[i].AsyncEventCode != 0) {
1819 			bcopy(&tmp_q[i], &aen[queue_cnt],
1820 			    sizeof (EXT_ASYNC_EVENT));
1821 			queue_cnt++;
1822 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1823 		}
1824 		if (i == xp->aen_q_tail) {
1825 			/* done. */
1826 			break;
1827 		}
1828 		i++;
1829 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1830 			i = 0;
1831 		}
1832 	}
1833 
1834 	/* Empty the queue. */
1835 	xp->aen_q_head = 0;
1836 	xp->aen_q_tail = 0;
1837 
1838 	INTR_UNLOCK(ha);
1839 
1840 	/* 2nd: Now transfer the queue content to user buffer */
1841 	/* Copy the entire queue to user's buffer. */
1842 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1843 	if (queue_cnt == 0) {
1844 		cmd->ResponseLen = 0;
1845 	} else if (ddi_copyout((void *)&aen[0],
1846 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1847 	    out_size, mode) != 0) {
1848 		cmd->Status = EXT_STATUS_COPY_ERR;
1849 		cmd->ResponseLen = 0;
1850 		EL(ha, "failed, ddi_copyout\n");
1851 	} else {
1852 		cmd->ResponseLen = out_size;
1853 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1854 	}
1855 }
1856 
1857 /*
1858  * ql_enqueue_aen
1859  *
1860  * Input:
1861  *	ha:		adapter state pointer.
1862  *	event_code:	async event code of the event to add to queue.
1863  *	payload:	event payload for the queue.
1864  *	INTR_LOCK must be already obtained.
1865  *
1866  * Context:
1867  *	Interrupt or Kernel context, no mailbox commands allowed.
1868  */
1869 void
1870 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1871 {
1872 	uint8_t			new_entry;	/* index to current entry */
1873 	uint16_t		*mbx;
1874 	EXT_ASYNC_EVENT		*aen_queue;
1875 	ql_xioctl_t		*xp = ha->xioctl;
1876 
1877 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1878 	    event_code);
1879 
1880 	if (xp == NULL) {
1881 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1882 		return;
1883 	}
1884 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1885 
1886 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1887 		/* Need to change queue pointers to make room. */
1888 
1889 		/* Increment tail for adding new entry. */
1890 		xp->aen_q_tail++;
1891 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1892 			xp->aen_q_tail = 0;
1893 		}
1894 		if (xp->aen_q_head == xp->aen_q_tail) {
1895 			/*
1896 			 * We're overwriting the oldest entry, so need to
1897 			 * update the head pointer.
1898 			 */
1899 			xp->aen_q_head++;
1900 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1901 				xp->aen_q_head = 0;
1902 			}
1903 		}
1904 	}
1905 
1906 	new_entry = xp->aen_q_tail;
1907 	aen_queue[new_entry].AsyncEventCode = event_code;
1908 
1909 	/* Update payload */
1910 	if (payload != NULL) {
1911 		switch (event_code) {
1912 		case MBA_LIP_OCCURRED:
1913 		case MBA_LOOP_UP:
1914 		case MBA_LOOP_DOWN:
1915 		case MBA_LIP_F8:
1916 		case MBA_LIP_RESET:
1917 		case MBA_PORT_UPDATE:
1918 			break;
1919 		case MBA_RSCN_UPDATE:
1920 			mbx = (uint16_t *)payload;
1921 			/* al_pa */
1922 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1923 			    LSB(mbx[2]);
1924 			/* area */
1925 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1926 			    MSB(mbx[2]);
1927 			/* domain */
1928 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1929 			    LSB(mbx[1]);
1930 			/* save in big endian */
1931 			BIG_ENDIAN_24(&aen_queue[new_entry].
1932 			    Payload.RSCN.RSCNInfo[0]);
1933 
1934 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1935 			    MSB(mbx[1]);
1936 
1937 			break;
1938 		default:
1939 			/* Not supported */
1940 			EL(ha, "failed, event code not supported=%xh\n",
1941 			    event_code);
1942 			aen_queue[new_entry].AsyncEventCode = 0;
1943 			break;
1944 		}
1945 	}
1946 
1947 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1948 }
1949 
1950 /*
1951  * ql_scsi_passthru
1952  *	IOCTL SCSI passthrough.
1953  *
1954  * Input:
1955  *	ha:	adapter state pointer.
1956  *	cmd:	User space SCSI command pointer.
1957  *	mode:	flags.
1958  *
1959  * Returns:
1960  *	None, request status indicated in cmd->Status.
1961  *
1962  * Context:
1963  *	Kernel context.
1964  */
1965 static void
1966 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1967 {
1968 	ql_mbx_iocb_t		*pkt;
1969 	ql_mbx_data_t		mr;
1970 	dma_mem_t		*dma_mem;
1971 	caddr_t			pld;
1972 	uint32_t		pkt_size, pld_size;
1973 	uint16_t		qlnt, retries, cnt, cnt2;
1974 	uint8_t			*name;
1975 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1976 	EXT_SCSI_PASSTHRU	*usp_req;
1977 	int			rval;
1978 	union _passthru {
1979 		EXT_SCSI_PASSTHRU	sp_cmd;
1980 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1981 	} pt_req;		/* Passthru request */
1982 	uint32_t		status, sense_sz = 0;
1983 	ql_tgt_t		*tq = NULL;
1984 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1985 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1986 
1987 	/* SCSI request struct for SCSI passthrough IOs. */
1988 	struct {
1989 		uint16_t	lun;
1990 		uint16_t	sense_length;	/* Sense buffer size */
1991 		size_t		resid;		/* Residual */
1992 		uint8_t		*cdbp;		/* Requestor's CDB */
1993 		uint8_t		*u_sense;	/* Requestor's sense buffer */
1994 		uint8_t		cdb_len;	/* Requestor's CDB length */
1995 		uint8_t		direction;
1996 	} scsi_req;
1997 
1998 	struct {
1999 		uint8_t		*rsp_info;
2000 		uint8_t		*req_sense_data;
2001 		uint32_t	residual_length;
2002 		uint32_t	rsp_info_length;
2003 		uint32_t	req_sense_length;
2004 		uint16_t	comp_status;
2005 		uint8_t		state_flags_l;
2006 		uint8_t		state_flags_h;
2007 		uint8_t		scsi_status_l;
2008 		uint8_t		scsi_status_h;
2009 	} sts;
2010 
2011 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2012 
2013 	/* Verify Sub Code and set cnt to needed request size. */
2014 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2015 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2016 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2017 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2018 	} else {
2019 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2020 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2021 		cmd->ResponseLen = 0;
2022 		return;
2023 	}
2024 
2025 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2026 	if (dma_mem == NULL) {
2027 		EL(ha, "failed, kmem_zalloc\n");
2028 		cmd->Status = EXT_STATUS_NO_MEMORY;
2029 		cmd->ResponseLen = 0;
2030 		return;
2031 	}
2032 	/*  Verify the size of and copy in the passthru request structure. */
2033 	if (cmd->RequestLen != pld_size) {
2034 		/* Return error */
2035 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2036 		    cmd->RequestLen, pld_size);
2037 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2038 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2039 		cmd->ResponseLen = 0;
2040 		return;
2041 	}
2042 
2043 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2044 	    pld_size, mode) != 0) {
2045 		EL(ha, "failed, ddi_copyin\n");
2046 		cmd->Status = EXT_STATUS_COPY_ERR;
2047 		cmd->ResponseLen = 0;
2048 		return;
2049 	}
2050 
2051 	/*
2052 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2053 	 * request data structure.
2054 	 */
2055 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2056 		scsi_req.lun = sp_req->TargetAddr.Lun;
2057 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2058 		scsi_req.cdbp = &sp_req->Cdb[0];
2059 		scsi_req.cdb_len = sp_req->CdbLength;
2060 		scsi_req.direction = sp_req->Direction;
2061 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2062 		scsi_req.u_sense = &usp_req->SenseData[0];
2063 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2064 
2065 		qlnt = QLNT_PORT;
2066 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2067 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2068 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2069 		tq = ql_find_port(ha, name, qlnt);
2070 	} else {
2071 		/*
2072 		 * Must be FC PASSTHRU, verified above.
2073 		 */
2074 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2075 			qlnt = QLNT_PORT;
2076 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2077 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2078 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2079 			    ha->instance, cmd->SubCode, name[0], name[1],
2080 			    name[2], name[3], name[4], name[5], name[6],
2081 			    name[7]);
2082 			tq = ql_find_port(ha, name, qlnt);
2083 		} else if (fc_req->FCScsiAddr.DestType ==
2084 		    EXT_DEF_DESTTYPE_WWNN) {
2085 			qlnt = QLNT_NODE;
2086 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2087 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2088 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2089 			    ha->instance, cmd->SubCode, name[0], name[1],
2090 			    name[2], name[3], name[4], name[5], name[6],
2091 			    name[7]);
2092 			tq = ql_find_port(ha, name, qlnt);
2093 		} else if (fc_req->FCScsiAddr.DestType ==
2094 		    EXT_DEF_DESTTYPE_PORTID) {
2095 			qlnt = QLNT_PID;
2096 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2097 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2098 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2099 			    name[0], name[1], name[2]);
2100 			tq = ql_find_port(ha, name, qlnt);
2101 		} else {
2102 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2103 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2104 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2105 			cmd->ResponseLen = 0;
2106 			return;
2107 		}
2108 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2109 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2110 		scsi_req.cdbp = &sp_req->Cdb[0];
2111 		scsi_req.cdb_len = sp_req->CdbLength;
2112 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2113 		scsi_req.u_sense = &ufc_req->SenseData[0];
2114 		scsi_req.direction = fc_req->Direction;
2115 	}
2116 
2117 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2118 		EL(ha, "failed, fc_port not found\n");
2119 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2120 		cmd->ResponseLen = 0;
2121 		return;
2122 	}
2123 
2124 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2125 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2126 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2127 		cmd->ResponseLen = 0;
2128 		return;
2129 	}
2130 
2131 	/* Allocate command block. */
2132 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2133 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2134 	    cmd->ResponseLen) {
2135 		pld_size = cmd->ResponseLen;
2136 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2137 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2138 		if (pkt == NULL) {
2139 			EL(ha, "failed, kmem_zalloc\n");
2140 			cmd->Status = EXT_STATUS_NO_MEMORY;
2141 			cmd->ResponseLen = 0;
2142 			return;
2143 		}
2144 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2145 
2146 		/* Get DMA memory for the IOCB */
2147 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2148 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2149 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2150 			    "alloc failed", QL_NAME, ha->instance);
2151 			kmem_free(pkt, pkt_size);
2152 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2153 			cmd->ResponseLen = 0;
2154 			return;
2155 		}
2156 
2157 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2158 			scsi_req.direction = (uint8_t)
2159 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2160 			    CF_RD : CF_DATA_IN | CF_STAG);
2161 		} else {
2162 			scsi_req.direction = (uint8_t)
2163 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2164 			    CF_WR : CF_DATA_OUT | CF_STAG);
2165 			cmd->ResponseLen = 0;
2166 
2167 			/* Get command payload. */
2168 			if (ql_get_buffer_data(
2169 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2170 			    pld, pld_size, mode) != pld_size) {
2171 				EL(ha, "failed, get_buffer_data\n");
2172 				cmd->Status = EXT_STATUS_COPY_ERR;
2173 
2174 				kmem_free(pkt, pkt_size);
2175 				ql_free_dma_resource(ha, dma_mem);
2176 				kmem_free(dma_mem, sizeof (dma_mem_t));
2177 				return;
2178 			}
2179 
2180 			/* Copy out going data to DMA buffer. */
2181 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2182 			    (uint8_t *)dma_mem->bp, pld_size,
2183 			    DDI_DEV_AUTOINCR);
2184 
2185 			/* Sync DMA buffer. */
2186 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2187 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2188 		}
2189 	} else {
2190 		scsi_req.direction = (uint8_t)
2191 		    (CFG_IST(ha, CFG_CTRL_242581) ? 0 : CF_STAG);
2192 		cmd->ResponseLen = 0;
2193 
2194 		pkt_size = sizeof (ql_mbx_iocb_t);
2195 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2196 		if (pkt == NULL) {
2197 			EL(ha, "failed, kmem_zalloc-2\n");
2198 			cmd->Status = EXT_STATUS_NO_MEMORY;
2199 			return;
2200 		}
2201 		pld = NULL;
2202 		pld_size = 0;
2203 	}
2204 
2205 	/* retries = ha->port_down_retry_count; */
2206 	retries = 1;
2207 	cmd->Status = EXT_STATUS_OK;
2208 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2209 
2210 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2211 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2212 
2213 	do {
2214 		if (DRIVER_SUSPENDED(ha)) {
2215 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2216 			break;
2217 		}
2218 
2219 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2220 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2221 			pkt->cmd24.entry_count = 1;
2222 
2223 			/* Set LUN number */
2224 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2225 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2226 
2227 			/* Set N_port handle */
2228 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2229 
2230 			/* Set VP Index */
2231 			pkt->cmd24.vp_index = ha->vp_index;
2232 
2233 			/* Set target ID */
2234 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2235 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2236 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2237 
2238 			/* Set ISP command timeout. */
2239 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2240 
2241 			/* Load SCSI CDB */
2242 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2243 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2244 			    DDI_DEV_AUTOINCR);
2245 			for (cnt = 0; cnt < MAX_CMDSZ;
2246 			    cnt = (uint16_t)(cnt + 4)) {
2247 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2248 				    + cnt, 4);
2249 			}
2250 
2251 			/* Set tag queue control flags */
2252 			pkt->cmd24.task = TA_STAG;
2253 
2254 			if (pld_size) {
2255 				/* Set transfer direction. */
2256 				pkt->cmd24.control_flags = scsi_req.direction;
2257 
2258 				/* Set data segment count. */
2259 				pkt->cmd24.dseg_count = LE_16(1);
2260 
2261 				/* Load total byte count. */
2262 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2263 
2264 				/* Load data descriptor. */
2265 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2266 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2267 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2268 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2269 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2270 			}
2271 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2272 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2273 			pkt->cmd3.entry_count = 1;
2274 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2275 				pkt->cmd3.target_l = LSB(tq->loop_id);
2276 				pkt->cmd3.target_h = MSB(tq->loop_id);
2277 			} else {
2278 				pkt->cmd3.target_h = LSB(tq->loop_id);
2279 			}
2280 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2281 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2282 			pkt->cmd3.control_flags_l = scsi_req.direction;
2283 			pkt->cmd3.timeout = LE_16(15);
2284 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2285 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2286 			}
2287 			if (pld_size) {
2288 				pkt->cmd3.dseg_count = LE_16(1);
2289 				pkt->cmd3.byte_count = LE_32(pld_size);
2290 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2291 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2292 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2293 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2294 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2295 			}
2296 		} else {
2297 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2298 			pkt->cmd.entry_count = 1;
2299 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2300 				pkt->cmd.target_l = LSB(tq->loop_id);
2301 				pkt->cmd.target_h = MSB(tq->loop_id);
2302 			} else {
2303 				pkt->cmd.target_h = LSB(tq->loop_id);
2304 			}
2305 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2306 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2307 			pkt->cmd.control_flags_l = scsi_req.direction;
2308 			pkt->cmd.timeout = LE_16(15);
2309 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2310 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2311 			}
2312 			if (pld_size) {
2313 				pkt->cmd.dseg_count = LE_16(1);
2314 				pkt->cmd.byte_count = LE_32(pld_size);
2315 				pkt->cmd.dseg_0_address = (uint32_t)
2316 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2317 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2318 			}
2319 		}
2320 		/* Go issue command and wait for completion. */
2321 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2322 		QL_DUMP_9(pkt, 8, pkt_size);
2323 
2324 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2325 
2326 		if (pld_size) {
2327 			/* Sync in coming DMA buffer. */
2328 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2329 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2330 			/* Copy in coming DMA data. */
2331 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2332 			    (uint8_t *)dma_mem->bp, pld_size,
2333 			    DDI_DEV_AUTOINCR);
2334 		}
2335 
2336 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2337 			pkt->sts24.entry_status = (uint8_t)
2338 			    (pkt->sts24.entry_status & 0x3c);
2339 		} else {
2340 			pkt->sts.entry_status = (uint8_t)
2341 			    (pkt->sts.entry_status & 0x7e);
2342 		}
2343 
2344 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2345 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2346 			    pkt->sts.entry_status, tq->d_id.b24);
2347 			status = QL_FUNCTION_PARAMETER_ERROR;
2348 		}
2349 
2350 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2351 		    LE_16(pkt->sts24.comp_status) :
2352 		    LE_16(pkt->sts.comp_status));
2353 
2354 		/*
2355 		 * We have verified about all the request that can be so far.
2356 		 * Now we need to start verification of our ability to
2357 		 * actually issue the CDB.
2358 		 */
2359 		if (DRIVER_SUSPENDED(ha)) {
2360 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2361 			break;
2362 		} else if (status == QL_SUCCESS &&
2363 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2364 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2365 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2366 			if (tq->flags & TQF_FABRIC_DEVICE) {
2367 				rval = ql_login_fport(ha, tq, tq->loop_id,
2368 				    LFF_NO_PLOGI, &mr);
2369 				if (rval != QL_SUCCESS) {
2370 					EL(ha, "failed, login_fport=%xh, "
2371 					    "d_id=%xh\n", rval, tq->d_id.b24);
2372 				}
2373 			} else {
2374 				rval = ql_login_lport(ha, tq, tq->loop_id,
2375 				    LLF_NONE);
2376 				if (rval != QL_SUCCESS) {
2377 					EL(ha, "failed, login_lport=%xh, "
2378 					    "d_id=%xh\n", rval, tq->d_id.b24);
2379 				}
2380 			}
2381 		} else {
2382 			break;
2383 		}
2384 
2385 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2386 
2387 	} while (retries--);
2388 
2389 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2390 		/* Cannot issue command now, maybe later */
2391 		EL(ha, "failed, suspended\n");
2392 		kmem_free(pkt, pkt_size);
2393 		ql_free_dma_resource(ha, dma_mem);
2394 		kmem_free(dma_mem, sizeof (dma_mem_t));
2395 		cmd->Status = EXT_STATUS_SUSPENDED;
2396 		cmd->ResponseLen = 0;
2397 		return;
2398 	}
2399 
2400 	if (status != QL_SUCCESS) {
2401 		/* Command error */
2402 		EL(ha, "failed, I/O\n");
2403 		kmem_free(pkt, pkt_size);
2404 		ql_free_dma_resource(ha, dma_mem);
2405 		kmem_free(dma_mem, sizeof (dma_mem_t));
2406 		cmd->Status = EXT_STATUS_ERR;
2407 		cmd->DetailStatus = status;
2408 		cmd->ResponseLen = 0;
2409 		return;
2410 	}
2411 
2412 	/* Setup status. */
2413 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2414 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2415 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2416 
2417 		/* Setup residuals. */
2418 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2419 
2420 		/* Setup state flags. */
2421 		sts.state_flags_l = pkt->sts24.state_flags_l;
2422 		sts.state_flags_h = pkt->sts24.state_flags_h;
2423 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2424 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2425 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2426 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2427 		} else {
2428 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2429 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2430 			    SF_GOT_STATUS);
2431 		}
2432 		if (scsi_req.direction & CF_WR) {
2433 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2434 			    SF_DATA_OUT);
2435 		} else if (scsi_req.direction & CF_RD) {
2436 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2437 			    SF_DATA_IN);
2438 		}
2439 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2440 
2441 		/* Setup FCP response info. */
2442 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2443 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2444 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2445 		for (cnt = 0; cnt < sts.rsp_info_length;
2446 		    cnt = (uint16_t)(cnt + 4)) {
2447 			ql_chg_endian(sts.rsp_info + cnt, 4);
2448 		}
2449 
2450 		/* Setup sense data. */
2451 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2452 			sts.req_sense_length =
2453 			    LE_32(pkt->sts24.fcp_sense_length);
2454 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2455 			    SF_ARQ_DONE);
2456 		} else {
2457 			sts.req_sense_length = 0;
2458 		}
2459 		sts.req_sense_data =
2460 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2461 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2462 		    (uintptr_t)sts.req_sense_data);
2463 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2464 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2465 		}
2466 	} else {
2467 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2468 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2469 
2470 		/* Setup residuals. */
2471 		sts.residual_length = LE_32(pkt->sts.residual_length);
2472 
2473 		/* Setup state flags. */
2474 		sts.state_flags_l = pkt->sts.state_flags_l;
2475 		sts.state_flags_h = pkt->sts.state_flags_h;
2476 
2477 		/* Setup FCP response info. */
2478 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2479 		    LE_16(pkt->sts.rsp_info_length) : 0;
2480 		sts.rsp_info = &pkt->sts.rsp_info[0];
2481 
2482 		/* Setup sense data. */
2483 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2484 		    LE_16(pkt->sts.req_sense_length) : 0;
2485 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2486 	}
2487 
2488 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2489 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2490 
2491 	switch (sts.comp_status) {
2492 	case CS_INCOMPLETE:
2493 	case CS_ABORTED:
2494 	case CS_DEVICE_UNAVAILABLE:
2495 	case CS_PORT_UNAVAILABLE:
2496 	case CS_PORT_LOGGED_OUT:
2497 	case CS_PORT_CONFIG_CHG:
2498 	case CS_PORT_BUSY:
2499 	case CS_LOOP_DOWN_ABORT:
2500 		cmd->Status = EXT_STATUS_BUSY;
2501 		break;
2502 	case CS_RESET:
2503 	case CS_QUEUE_FULL:
2504 		cmd->Status = EXT_STATUS_ERR;
2505 		break;
2506 	case CS_TIMEOUT:
2507 		cmd->Status = EXT_STATUS_ERR;
2508 		break;
2509 	case CS_DATA_OVERRUN:
2510 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2511 		break;
2512 	case CS_DATA_UNDERRUN:
2513 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2514 		break;
2515 	}
2516 
2517 	/*
2518 	 * If non data transfer commands fix tranfer counts.
2519 	 */
2520 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2521 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2522 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2523 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2524 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2525 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2526 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2527 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2528 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2529 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2530 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2531 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2532 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2533 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2534 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2535 		/*
2536 		 * Non data transfer command, clear sts_entry residual
2537 		 * length.
2538 		 */
2539 		sts.residual_length = 0;
2540 		cmd->ResponseLen = 0;
2541 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2542 			sts.comp_status = CS_COMPLETE;
2543 			cmd->Status = EXT_STATUS_OK;
2544 		}
2545 	} else {
2546 		cmd->ResponseLen = pld_size;
2547 	}
2548 
2549 	/* Correct ISP completion status */
2550 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2551 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2552 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2553 		    ha->instance);
2554 		scsi_req.resid = 0;
2555 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2556 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2557 		    ha->instance);
2558 		scsi_req.resid = sts.residual_length;
2559 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2560 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2561 
2562 			cmd->ResponseLen = (uint32_t)
2563 			    (pld_size - scsi_req.resid);
2564 		} else {
2565 			EL(ha, "failed, Transfer ERROR\n");
2566 			cmd->Status = EXT_STATUS_ERR;
2567 			cmd->ResponseLen = 0;
2568 		}
2569 	} else {
2570 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2571 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2572 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2573 		    sts.scsi_status_l);
2574 
2575 		scsi_req.resid = pld_size;
2576 		/*
2577 		 * Handle residual count on SCSI check
2578 		 * condition.
2579 		 *
2580 		 * - If Residual Under / Over is set, use the
2581 		 *   Residual Transfer Length field in IOCB.
2582 		 * - If Residual Under / Over is not set, and
2583 		 *   Transferred Data bit is set in State Flags
2584 		 *   field of IOCB, report residual value of 0
2585 		 *   (you may want to do this for tape
2586 		 *   Write-type commands only). This takes care
2587 		 *   of logical end of tape problem and does
2588 		 *   not break Unit Attention.
2589 		 * - If Residual Under / Over is not set, and
2590 		 *   Transferred Data bit is not set in State
2591 		 *   Flags, report residual value equal to
2592 		 *   original data transfer length.
2593 		 */
2594 		if (sts.scsi_status_l & STATUS_CHECK) {
2595 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2596 			cmd->DetailStatus = sts.scsi_status_l;
2597 			if (sts.scsi_status_h &
2598 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2599 				scsi_req.resid = sts.residual_length;
2600 			} else if (sts.state_flags_h &
2601 			    STATE_XFERRED_DATA) {
2602 				scsi_req.resid = 0;
2603 			}
2604 		}
2605 	}
2606 
2607 	if (sts.scsi_status_l & STATUS_CHECK &&
2608 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2609 	    sts.req_sense_length) {
2610 		/*
2611 		 * Check condition with vaild sense data flag set and sense
2612 		 * length != 0
2613 		 */
2614 		if (sts.req_sense_length > scsi_req.sense_length) {
2615 			sense_sz = scsi_req.sense_length;
2616 		} else {
2617 			sense_sz = sts.req_sense_length;
2618 		}
2619 
2620 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2621 		    tq->d_id.b24);
2622 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2623 
2624 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2625 		    (size_t)sense_sz, mode) != 0) {
2626 			EL(ha, "failed, request sense ddi_copyout\n");
2627 		}
2628 
2629 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2630 		cmd->DetailStatus = sts.scsi_status_l;
2631 	}
2632 
2633 	/* Copy response payload from DMA buffer to application. */
2634 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2635 	    cmd->ResponseLen != 0) {
2636 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2637 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2638 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2639 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2640 
2641 		/* Send response payload. */
2642 		if (ql_send_buffer_data(pld,
2643 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2644 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2645 			EL(ha, "failed, send_buffer_data\n");
2646 			cmd->Status = EXT_STATUS_COPY_ERR;
2647 			cmd->ResponseLen = 0;
2648 		}
2649 	}
2650 
2651 	if (cmd->Status != EXT_STATUS_OK) {
2652 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2653 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2654 	} else {
2655 		/*EMPTY*/
2656 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2657 		    ha->instance, cmd->ResponseLen);
2658 	}
2659 
2660 	kmem_free(pkt, pkt_size);
2661 	ql_free_dma_resource(ha, dma_mem);
2662 	kmem_free(dma_mem, sizeof (dma_mem_t));
2663 }
2664 
2665 /*
2666  * ql_wwpn_to_scsiaddr
2667  *
2668  * Input:
2669  *	ha:	adapter state pointer.
2670  *	cmd:	EXT_IOCTL cmd struct pointer.
2671  *	mode:	flags.
2672  *
2673  * Context:
2674  *	Kernel context.
2675  */
2676 static void
2677 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2678 {
2679 	int		status;
2680 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2681 	EXT_SCSI_ADDR	*tmp_addr;
2682 	ql_tgt_t	*tq;
2683 
2684 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2685 
2686 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2687 		/* Return error */
2688 		EL(ha, "incorrect RequestLen\n");
2689 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2690 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2691 		return;
2692 	}
2693 
2694 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2695 	    cmd->RequestLen, mode);
2696 
2697 	if (status != 0) {
2698 		cmd->Status = EXT_STATUS_COPY_ERR;
2699 		EL(ha, "failed, ddi_copyin\n");
2700 		return;
2701 	}
2702 
2703 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2704 
2705 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2706 		/* no matching device */
2707 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2708 		EL(ha, "failed, device not found\n");
2709 		return;
2710 	}
2711 
2712 	/* Copy out the IDs found.  For now we can only return target ID. */
2713 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2714 
2715 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2716 
2717 	if (status != 0) {
2718 		cmd->Status = EXT_STATUS_COPY_ERR;
2719 		EL(ha, "failed, ddi_copyout\n");
2720 	} else {
2721 		cmd->Status = EXT_STATUS_OK;
2722 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2723 	}
2724 }
2725 
2726 /*
2727  * ql_host_idx
2728  *	Gets host order index.
2729  *
2730  * Input:
2731  *	ha:	adapter state pointer.
2732  *	cmd:	EXT_IOCTL cmd struct pointer.
2733  *	mode:	flags.
2734  *
2735  * Returns:
2736  *	None, request status indicated in cmd->Status.
2737  *
2738  * Context:
2739  *	Kernel context.
2740  */
2741 static void
2742 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2743 {
2744 	uint16_t	idx;
2745 
2746 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2747 
2748 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2749 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2750 		cmd->DetailStatus = sizeof (uint16_t);
2751 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2752 		cmd->ResponseLen = 0;
2753 		return;
2754 	}
2755 
2756 	idx = (uint16_t)ha->instance;
2757 
2758 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2759 	    sizeof (uint16_t), mode) != 0) {
2760 		cmd->Status = EXT_STATUS_COPY_ERR;
2761 		cmd->ResponseLen = 0;
2762 		EL(ha, "failed, ddi_copyout\n");
2763 	} else {
2764 		cmd->ResponseLen = sizeof (uint16_t);
2765 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2766 	}
2767 }
2768 
2769 /*
2770  * ql_host_drvname
2771  *	Gets host driver name
2772  *
2773  * Input:
2774  *	ha:	adapter state pointer.
2775  *	cmd:	EXT_IOCTL cmd struct pointer.
2776  *	mode:	flags.
2777  *
2778  * Returns:
2779  *	None, request status indicated in cmd->Status.
2780  *
2781  * Context:
2782  *	Kernel context.
2783  */
2784 static void
2785 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2786 {
2787 
2788 	char		drvname[] = QL_NAME;
2789 	uint32_t	qlnamelen;
2790 
2791 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2792 
2793 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2794 
2795 	if (cmd->ResponseLen < qlnamelen) {
2796 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2797 		cmd->DetailStatus = qlnamelen;
2798 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2799 		    cmd->ResponseLen, qlnamelen);
2800 		cmd->ResponseLen = 0;
2801 		return;
2802 	}
2803 
2804 	if (ddi_copyout((void *)&drvname,
2805 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2806 	    qlnamelen, mode) != 0) {
2807 		cmd->Status = EXT_STATUS_COPY_ERR;
2808 		cmd->ResponseLen = 0;
2809 		EL(ha, "failed, ddi_copyout\n");
2810 	} else {
2811 		cmd->ResponseLen = qlnamelen-1;
2812 	}
2813 
2814 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2815 }
2816 
2817 /*
2818  * ql_read_nvram
2819  *	Get NVRAM contents.
2820  *
2821  * Input:
2822  *	ha:	adapter state pointer.
2823  *	cmd:	EXT_IOCTL cmd struct pointer.
2824  *	mode:	flags.
2825  *
2826  * Returns:
2827  *	None, request status indicated in cmd->Status.
2828  *
2829  * Context:
2830  *	Kernel context.
2831  */
2832 static void
2833 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2834 {
2835 	uint32_t	nv_size;
2836 
2837 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2838 
2839 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2840 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2841 	if (cmd->ResponseLen < nv_size) {
2842 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2843 		cmd->DetailStatus = nv_size;
2844 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2845 		    cmd->ResponseLen);
2846 		cmd->ResponseLen = 0;
2847 		return;
2848 	}
2849 
2850 	/* Get NVRAM data. */
2851 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2852 	    mode) != 0) {
2853 		cmd->Status = EXT_STATUS_COPY_ERR;
2854 		cmd->ResponseLen = 0;
2855 		EL(ha, "failed, copy error\n");
2856 	} else {
2857 		cmd->ResponseLen = nv_size;
2858 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2859 	}
2860 }
2861 
2862 /*
2863  * ql_write_nvram
2864  *	Loads NVRAM contents.
2865  *
2866  * Input:
2867  *	ha:	adapter state pointer.
2868  *	cmd:	EXT_IOCTL cmd struct pointer.
2869  *	mode:	flags.
2870  *
2871  * Returns:
2872  *	None, request status indicated in cmd->Status.
2873  *
2874  * Context:
2875  *	Kernel context.
2876  */
2877 static void
2878 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2879 {
2880 	uint32_t	nv_size;
2881 
2882 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2883 
2884 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2885 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2886 	if (cmd->RequestLen < nv_size) {
2887 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2888 		cmd->DetailStatus = sizeof (nvram_t);
2889 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2890 		    cmd->RequestLen);
2891 		return;
2892 	}
2893 
2894 	/* Load NVRAM data. */
2895 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2896 	    mode) != 0) {
2897 		cmd->Status = EXT_STATUS_COPY_ERR;
2898 		EL(ha, "failed, copy error\n");
2899 	} else {
2900 		/*EMPTY*/
2901 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2902 	}
2903 }
2904 
2905 /*
2906  * ql_write_vpd
2907  *	Loads VPD contents.
2908  *
2909  * Input:
2910  *	ha:	adapter state pointer.
2911  *	cmd:	EXT_IOCTL cmd struct pointer.
2912  *	mode:	flags.
2913  *
2914  * Returns:
2915  *	None, request status indicated in cmd->Status.
2916  *
2917  * Context:
2918  *	Kernel context.
2919  */
2920 static void
2921 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2922 {
2923 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2924 
2925 	int32_t		rval = 0;
2926 
2927 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2928 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2929 		EL(ha, "failed, invalid request for HBA\n");
2930 		return;
2931 	}
2932 
2933 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2934 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2935 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2936 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2937 		    cmd->RequestLen);
2938 		return;
2939 	}
2940 
2941 	/* Load VPD data. */
2942 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2943 	    mode)) != 0) {
2944 		cmd->Status = EXT_STATUS_COPY_ERR;
2945 		cmd->DetailStatus = rval;
2946 		EL(ha, "failed, errno=%x\n", rval);
2947 	} else {
2948 		/*EMPTY*/
2949 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2950 	}
2951 }
2952 
2953 /*
2954  * ql_read_vpd
2955  *	Dumps VPD contents.
2956  *
2957  * Input:
2958  *	ha:	adapter state pointer.
2959  *	cmd:	EXT_IOCTL cmd struct pointer.
2960  *	mode:	flags.
2961  *
2962  * Returns:
2963  *	None, request status indicated in cmd->Status.
2964  *
2965  * Context:
2966  *	Kernel context.
2967  */
2968 static void
2969 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2970 {
2971 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2972 
2973 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2974 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2975 		EL(ha, "failed, invalid request for HBA\n");
2976 		return;
2977 	}
2978 
2979 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2980 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2981 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2982 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2983 		    cmd->ResponseLen);
2984 		return;
2985 	}
2986 
2987 	/* Dump VPD data. */
2988 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2989 	    mode)) != 0) {
2990 		cmd->Status = EXT_STATUS_COPY_ERR;
2991 		EL(ha, "failed,\n");
2992 	} else {
2993 		/*EMPTY*/
2994 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2995 	}
2996 }
2997 
2998 /*
2999  * ql_get_fcache
3000  *	Dumps flash cache contents.
3001  *
3002  * Input:
3003  *	ha:	adapter state pointer.
3004  *	cmd:	EXT_IOCTL cmd struct pointer.
3005  *	mode:	flags.
3006  *
3007  * Returns:
3008  *	None, request status indicated in cmd->Status.
3009  *
3010  * Context:
3011  *	Kernel context.
3012  */
3013 static void
3014 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3015 {
3016 	uint32_t	bsize, boff, types, cpsize, hsize;
3017 	ql_fcache_t	*fptr;
3018 
3019 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3020 
3021 	CACHE_LOCK(ha);
3022 
3023 	if (ha->fcache == NULL) {
3024 		CACHE_UNLOCK(ha);
3025 		cmd->Status = EXT_STATUS_ERR;
3026 		EL(ha, "failed, adapter fcache not setup\n");
3027 		return;
3028 	}
3029 
3030 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
3031 		bsize = 100;
3032 	} else {
3033 		bsize = 400;
3034 	}
3035 
3036 	if (cmd->ResponseLen < bsize) {
3037 		CACHE_UNLOCK(ha);
3038 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3039 		cmd->DetailStatus = bsize;
3040 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3041 		    bsize, cmd->ResponseLen);
3042 		return;
3043 	}
3044 
3045 	boff = 0;
3046 	bsize = 0;
3047 	fptr = ha->fcache;
3048 
3049 	/*
3050 	 * For backwards compatibility, get one of each image type
3051 	 */
3052 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3053 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3054 		/* Get the next image */
3055 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3056 
3057 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3058 
3059 			if (ddi_copyout(fptr->buf,
3060 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3061 			    cpsize, mode) != 0) {
3062 				CACHE_UNLOCK(ha);
3063 				EL(ha, "ddicopy failed, done\n");
3064 				cmd->Status = EXT_STATUS_COPY_ERR;
3065 				cmd->DetailStatus = 0;
3066 				return;
3067 			}
3068 			boff += 100;
3069 			bsize += cpsize;
3070 			types &= ~(fptr->type);
3071 		}
3072 	}
3073 
3074 	/*
3075 	 * Get the firmware image -- it needs to be last in the
3076 	 * buffer at offset 300 for backwards compatibility. Also for
3077 	 * backwards compatibility, the pci header is stripped off.
3078 	 */
3079 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3080 
3081 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3082 		if (hsize > fptr->buflen) {
3083 			CACHE_UNLOCK(ha);
3084 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3085 			    hsize, fptr->buflen);
3086 			cmd->Status = EXT_STATUS_COPY_ERR;
3087 			cmd->DetailStatus = 0;
3088 			return;
3089 		}
3090 
3091 		cpsize = ((fptr->buflen - hsize) < 100 ?
3092 		    fptr->buflen - hsize : 100);
3093 
3094 		if (ddi_copyout(fptr->buf+hsize,
3095 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3096 		    cpsize, mode) != 0) {
3097 			CACHE_UNLOCK(ha);
3098 			EL(ha, "fw ddicopy failed, done\n");
3099 			cmd->Status = EXT_STATUS_COPY_ERR;
3100 			cmd->DetailStatus = 0;
3101 			return;
3102 		}
3103 		bsize += 100;
3104 	}
3105 
3106 	CACHE_UNLOCK(ha);
3107 	cmd->Status = EXT_STATUS_OK;
3108 	cmd->DetailStatus = bsize;
3109 
3110 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3111 }
3112 
3113 /*
3114  * ql_get_fcache_ex
3115  *	Dumps flash cache contents.
3116  *
3117  * Input:
3118  *	ha:	adapter state pointer.
3119  *	cmd:	EXT_IOCTL cmd struct pointer.
3120  *	mode:	flags.
3121  *
3122  * Returns:
3123  *	None, request status indicated in cmd->Status.
3124  *
3125  * Context:
3126  *	Kernel context.
3127  */
3128 static void
3129 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3130 {
3131 	uint32_t	bsize = 0;
3132 	uint32_t	boff = 0;
3133 	ql_fcache_t	*fptr;
3134 
3135 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3136 
3137 	CACHE_LOCK(ha);
3138 	if (ha->fcache == NULL) {
3139 		CACHE_UNLOCK(ha);
3140 		cmd->Status = EXT_STATUS_ERR;
3141 		EL(ha, "failed, adapter fcache not setup\n");
3142 		return;
3143 	}
3144 
3145 	/* Make sure user passed enough buffer space */
3146 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3147 		bsize += FBUFSIZE;
3148 	}
3149 
3150 	if (cmd->ResponseLen < bsize) {
3151 		CACHE_UNLOCK(ha);
3152 		if (cmd->ResponseLen != 0) {
3153 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3154 			    bsize, cmd->ResponseLen);
3155 		}
3156 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3157 		cmd->DetailStatus = bsize;
3158 		return;
3159 	}
3160 
3161 	boff = 0;
3162 	fptr = ha->fcache;
3163 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3164 		/* Get the next image */
3165 		if (ddi_copyout(fptr->buf,
3166 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3167 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3168 		    mode) != 0) {
3169 			CACHE_UNLOCK(ha);
3170 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3171 			cmd->Status = EXT_STATUS_COPY_ERR;
3172 			cmd->DetailStatus = 0;
3173 			return;
3174 		}
3175 		boff += FBUFSIZE;
3176 		fptr = fptr->next;
3177 	}
3178 
3179 	CACHE_UNLOCK(ha);
3180 	cmd->Status = EXT_STATUS_OK;
3181 	cmd->DetailStatus = bsize;
3182 
3183 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3184 }
3185 
3186 /*
3187  * ql_read_flash
3188  *	Get flash contents.
3189  *
3190  * Input:
3191  *	ha:	adapter state pointer.
3192  *	cmd:	EXT_IOCTL cmd struct pointer.
3193  *	mode:	flags.
3194  *
3195  * Returns:
3196  *	None, request status indicated in cmd->Status.
3197  *
3198  * Context:
3199  *	Kernel context.
3200  */
3201 static void
3202 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3203 {
3204 	ql_xioctl_t	*xp = ha->xioctl;
3205 
3206 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3207 
3208 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3209 		EL(ha, "ql_stall_driver failed\n");
3210 		cmd->Status = EXT_STATUS_BUSY;
3211 		cmd->DetailStatus = xp->fdesc.flash_size;
3212 		cmd->ResponseLen = 0;
3213 		return;
3214 	}
3215 
3216 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3217 		cmd->Status = EXT_STATUS_ERR;
3218 		cmd->DetailStatus = xp->fdesc.flash_size;
3219 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3220 		    cmd->ResponseLen, xp->fdesc.flash_size);
3221 		cmd->ResponseLen = 0;
3222 	} else {
3223 		/* adjust read size to flash size */
3224 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3225 			EL(ha, "adjusting req=%xh, max=%xh\n",
3226 			    cmd->ResponseLen, xp->fdesc.flash_size);
3227 			cmd->ResponseLen = xp->fdesc.flash_size;
3228 		}
3229 
3230 		/* Get flash data. */
3231 		if (ql_flash_fcode_dump(ha,
3232 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3233 		    (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3234 			cmd->Status = EXT_STATUS_COPY_ERR;
3235 			cmd->ResponseLen = 0;
3236 			EL(ha, "failed,\n");
3237 		}
3238 	}
3239 
3240 	/* Resume I/O */
3241 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3242 		ql_restart_driver(ha);
3243 	} else {
3244 		EL(ha, "isp_abort_needed for restart\n");
3245 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3246 		    DRIVER_STALL);
3247 	}
3248 
3249 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3250 }
3251 
3252 /*
3253  * ql_write_flash
3254  *	Loads flash contents.
3255  *
3256  * Input:
3257  *	ha:	adapter state pointer.
3258  *	cmd:	EXT_IOCTL cmd struct pointer.
3259  *	mode:	flags.
3260  *
3261  * Returns:
3262  *	None, request status indicated in cmd->Status.
3263  *
3264  * Context:
3265  *	Kernel context.
3266  */
3267 static void
3268 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3269 {
3270 	ql_xioctl_t	*xp = ha->xioctl;
3271 
3272 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3273 
3274 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3275 		EL(ha, "ql_stall_driver failed\n");
3276 		cmd->Status = EXT_STATUS_BUSY;
3277 		cmd->DetailStatus = xp->fdesc.flash_size;
3278 		cmd->ResponseLen = 0;
3279 		return;
3280 	}
3281 
3282 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3283 		cmd->Status = EXT_STATUS_ERR;
3284 		cmd->DetailStatus = xp->fdesc.flash_size;
3285 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3286 		    cmd->RequestLen, xp->fdesc.flash_size);
3287 		cmd->ResponseLen = 0;
3288 	} else {
3289 		/* Load flash data. */
3290 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3291 			cmd->Status = EXT_STATUS_ERR;
3292 			cmd->DetailStatus =  xp->fdesc.flash_size;
3293 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3294 			    cmd->RequestLen, xp->fdesc.flash_size);
3295 		} else if (ql_flash_fcode_load(ha,
3296 		    (void *)(uintptr_t)(cmd->RequestAdr),
3297 		    (size_t)(cmd->RequestLen), mode) != 0) {
3298 			cmd->Status = EXT_STATUS_COPY_ERR;
3299 			EL(ha, "failed,\n");
3300 		}
3301 	}
3302 
3303 	/* Resume I/O */
3304 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3305 		ql_restart_driver(ha);
3306 	} else {
3307 		EL(ha, "isp_abort_needed for restart\n");
3308 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3309 		    DRIVER_STALL);
3310 	}
3311 
3312 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3313 }
3314 
3315 /*
3316  * ql_diagnostic_loopback
3317  *	Performs EXT_CC_LOOPBACK Command
3318  *
3319  * Input:
3320  *	ha:	adapter state pointer.
3321  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3322  *	mode:	flags.
3323  *
3324  * Returns:
3325  *	None, request status indicated in cmd->Status.
3326  *
3327  * Context:
3328  *	Kernel context.
3329  */
3330 static void
3331 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3332 {
3333 	EXT_LOOPBACK_REQ	plbreq;
3334 	EXT_LOOPBACK_RSP	plbrsp;
3335 	ql_mbx_data_t		mr;
3336 	uint32_t		rval;
3337 	caddr_t			bp;
3338 
3339 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3340 
3341 	/* Get loop back request. */
3342 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3343 	    (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3344 		EL(ha, "failed, ddi_copyin\n");
3345 		cmd->Status = EXT_STATUS_COPY_ERR;
3346 		cmd->ResponseLen = 0;
3347 		return;
3348 	}
3349 
3350 	/* Check transfer length fits in buffer. */
3351 	if (plbreq.BufferLength < plbreq.TransferCount &&
3352 	    plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3353 		EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3354 		    "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3355 		    plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3356 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3357 		cmd->ResponseLen = 0;
3358 		return;
3359 	}
3360 
3361 	/* Allocate command memory. */
3362 	bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3363 	if (bp == NULL) {
3364 		EL(ha, "failed, kmem_zalloc\n");
3365 		cmd->Status = EXT_STATUS_NO_MEMORY;
3366 		cmd->ResponseLen = 0;
3367 		return;
3368 	}
3369 
3370 	/* Get loopback data. */
3371 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3372 	    bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3373 		EL(ha, "failed, ddi_copyin-2\n");
3374 		kmem_free(bp, plbreq.TransferCount);
3375 		cmd->Status = EXT_STATUS_COPY_ERR;
3376 		cmd->ResponseLen = 0;
3377 		return;
3378 	}
3379 
3380 	if (DRIVER_SUSPENDED(ha) || ql_stall_driver(ha, 0) != QL_SUCCESS) {
3381 		EL(ha, "failed, LOOP_NOT_READY\n");
3382 		kmem_free(bp, plbreq.TransferCount);
3383 		cmd->Status = EXT_STATUS_BUSY;
3384 		cmd->ResponseLen = 0;
3385 		return;
3386 	}
3387 
3388 	/* Shutdown IP. */
3389 	if (ha->flags & IP_INITIALIZED) {
3390 		(void) ql_shutdown_ip(ha);
3391 	}
3392 
3393 	/* determine topology so we can send the loopback or the echo */
3394 	/* Echo is supported on 2300's only and above */
3395 
3396 	if ((ha->topology & QL_F_PORT) && ha->device_id >= 0x2300) {
3397 		QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using echo\n",
3398 		    ha->instance);
3399 		plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3400 		rval = ql_diag_echo(ha, bp, plbreq.TransferCount, 0, &mr);
3401 	} else {
3402 		plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3403 		rval = ql_diag_loopback(ha, bp, plbreq.TransferCount,
3404 		    plbreq.Options, plbreq.IterationCount, &mr);
3405 	}
3406 
3407 	ql_restart_driver(ha);
3408 
3409 	/* Restart IP if it was shutdown. */
3410 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3411 		(void) ql_initialize_ip(ha);
3412 		ql_isp_rcvbuf(ha);
3413 	}
3414 
3415 	if (rval != QL_SUCCESS) {
3416 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3417 		kmem_free(bp, plbreq.TransferCount);
3418 		cmd->Status = EXT_STATUS_MAILBOX;
3419 		cmd->DetailStatus = rval;
3420 		cmd->ResponseLen = 0;
3421 		return;
3422 	}
3423 
3424 	/* Return loopback data. */
3425 	if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3426 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3427 		EL(ha, "failed, ddi_copyout\n");
3428 		kmem_free(bp, plbreq.TransferCount);
3429 		cmd->Status = EXT_STATUS_COPY_ERR;
3430 		cmd->ResponseLen = 0;
3431 		return;
3432 	}
3433 	kmem_free(bp, plbreq.TransferCount);
3434 
3435 	/* Return loopback results. */
3436 	plbrsp.BufferAddress = plbreq.BufferAddress;
3437 	plbrsp.BufferLength = plbreq.TransferCount;
3438 	plbrsp.CompletionStatus = mr.mb[0];
3439 
3440 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3441 		plbrsp.CrcErrorCount = 0;
3442 		plbrsp.DisparityErrorCount = 0;
3443 		plbrsp.FrameLengthErrorCount = 0;
3444 		plbrsp.IterationCountLastError = 0;
3445 	} else {
3446 		plbrsp.CrcErrorCount = mr.mb[1];
3447 		plbrsp.DisparityErrorCount = mr.mb[2];
3448 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3449 		plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3450 	}
3451 
3452 	rval = ddi_copyout((void *)&plbrsp,
3453 	    (void *)(uintptr_t)cmd->ResponseAdr,
3454 	    sizeof (EXT_LOOPBACK_RSP), mode);
3455 	if (rval != 0) {
3456 		EL(ha, "failed, ddi_copyout-2\n");
3457 		cmd->Status = EXT_STATUS_COPY_ERR;
3458 		cmd->ResponseLen = 0;
3459 		return;
3460 	}
3461 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3462 
3463 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3464 }
3465 
3466 /*
3467  * ql_send_els_rnid
3468  *	IOCTL for extended link service RNID command.
3469  *
3470  * Input:
3471  *	ha:	adapter state pointer.
3472  *	cmd:	User space CT arguments pointer.
3473  *	mode:	flags.
3474  *
3475  * Returns:
3476  *	None, request status indicated in cmd->Status.
3477  *
3478  * Context:
3479  *	Kernel context.
3480  */
3481 static void
3482 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3483 {
3484 	EXT_RNID_REQ	tmp_rnid;
3485 	port_id_t	tmp_fcid;
3486 	caddr_t		tmp_buf, bptr;
3487 	uint32_t	copy_len;
3488 	ql_tgt_t	*tq;
3489 	EXT_RNID_DATA	rnid_data;
3490 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3491 	int		rval = 0;
3492 	uint32_t	local_hba = 0;
3493 
3494 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3495 
3496 	if (DRIVER_SUSPENDED(ha)) {
3497 		EL(ha, "failed, LOOP_NOT_READY\n");
3498 		cmd->Status = EXT_STATUS_BUSY;
3499 		cmd->ResponseLen = 0;
3500 		return;
3501 	}
3502 
3503 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3504 		/* parameter error */
3505 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3506 		    cmd->RequestLen);
3507 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3508 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3509 		cmd->ResponseLen = 0;
3510 		return;
3511 	}
3512 
3513 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3514 	    &tmp_rnid, cmd->RequestLen, mode) != 0) {
3515 		EL(ha, "failed, ddi_copyin\n");
3516 		cmd->Status = EXT_STATUS_COPY_ERR;
3517 		cmd->ResponseLen = 0;
3518 		return;
3519 	}
3520 
3521 	/* Find loop ID of the device */
3522 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3523 		bptr = CFG_IST(ha, CFG_CTRL_242581) ?
3524 		    (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3525 		    (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3526 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3527 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3528 			local_hba = 1;
3529 		} else {
3530 			tq = ql_find_port(ha,
3531 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3532 		}
3533 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3534 		bptr = CFG_IST(ha, CFG_CTRL_242581) ?
3535 		    (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3536 		    (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3537 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3538 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3539 			local_hba = 1;
3540 		} else {
3541 			tq = ql_find_port(ha,
3542 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3543 		}
3544 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3545 		/*
3546 		 * Copy caller's d_id to tmp space.
3547 		 */
3548 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3549 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3550 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3551 
3552 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3553 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3554 			local_hba = 1;
3555 		} else {
3556 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3557 			    QLNT_PID);
3558 		}
3559 	}
3560 
3561 	/* Allocate memory for command. */
3562 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3563 	if (tmp_buf == NULL) {
3564 		EL(ha, "failed, kmem_zalloc\n");
3565 		cmd->Status = EXT_STATUS_NO_MEMORY;
3566 		cmd->ResponseLen = 0;
3567 		return;
3568 	}
3569 
3570 	if (local_hba) {
3571 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3572 		if (rval != QL_SUCCESS) {
3573 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3574 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3575 			cmd->Status = EXT_STATUS_ERR;
3576 			cmd->ResponseLen = 0;
3577 			return;
3578 		}
3579 
3580 		/* Save gotten RNID data. */
3581 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3582 
3583 		/* Now build the Send RNID response */
3584 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3585 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3586 		tmp_buf[2] = 0;
3587 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3588 
3589 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3590 			bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3591 			    EXT_DEF_WWN_NAME_SIZE);
3592 			bcopy(ha->init_ctrl_blk.cb24.node_name,
3593 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3594 			    EXT_DEF_WWN_NAME_SIZE);
3595 		} else {
3596 			bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3597 			    EXT_DEF_WWN_NAME_SIZE);
3598 			bcopy(ha->init_ctrl_blk.cb.node_name,
3599 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3600 			    EXT_DEF_WWN_NAME_SIZE);
3601 		}
3602 
3603 		bcopy((uint8_t *)&rnid_data,
3604 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3605 		    sizeof (EXT_RNID_DATA));
3606 	} else {
3607 		if (tq == NULL) {
3608 			/* no matching device */
3609 			EL(ha, "failed, device not found\n");
3610 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3611 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3612 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3613 			cmd->ResponseLen = 0;
3614 			return;
3615 		}
3616 
3617 		/* Send command */
3618 		rval = ql_send_rnid_els(ha, tq->loop_id,
3619 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3620 		if (rval != QL_SUCCESS) {
3621 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3622 			    rval, tq->loop_id);
3623 			while (LOOP_NOT_READY(ha)) {
3624 				ql_delay(ha, 100000);
3625 				if (loop_ready_wait-- == 0) {
3626 					EL(ha, "failed, loop not ready\n");
3627 					cmd->Status = EXT_STATUS_ERR;
3628 					cmd->ResponseLen = 0;
3629 				}
3630 			}
3631 			rval = ql_send_rnid_els(ha, tq->loop_id,
3632 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3633 			    tmp_buf);
3634 			if (rval != QL_SUCCESS) {
3635 				/* error */
3636 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3637 				    rval, tq->loop_id);
3638 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3639 				cmd->Status = EXT_STATUS_ERR;
3640 				cmd->ResponseLen = 0;
3641 				return;
3642 			}
3643 		}
3644 	}
3645 
3646 	/* Copy the response */
3647 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3648 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3649 
3650 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3651 	    copy_len, mode) != copy_len) {
3652 		cmd->Status = EXT_STATUS_COPY_ERR;
3653 		EL(ha, "failed, ddi_copyout\n");
3654 	} else {
3655 		cmd->ResponseLen = copy_len;
3656 		if (copy_len < SEND_RNID_RSP_SIZE) {
3657 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3658 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3659 
3660 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3661 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3662 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3663 		} else {
3664 			cmd->Status = EXT_STATUS_OK;
3665 			QL_PRINT_9(CE_CONT, "(%d): done\n",
3666 			    ha->instance);
3667 		}
3668 	}
3669 
3670 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3671 }
3672 
3673 /*
3674  * ql_set_host_data
3675  *	Process IOCTL subcommand to set host/adapter related data.
3676  *
3677  * Input:
3678  *	ha:	adapter state pointer.
3679  *	cmd:	User space CT arguments pointer.
3680  *	mode:	flags.
3681  *
3682  * Returns:
3683  *	None, request status indicated in cmd->Status.
3684  *
3685  * Context:
3686  *	Kernel context.
3687  */
3688 static void
3689 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3690 {
3691 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3692 	    cmd->SubCode);
3693 
3694 	/*
3695 	 * case off on command subcode
3696 	 */
3697 	switch (cmd->SubCode) {
3698 	case EXT_SC_SET_RNID:
3699 		ql_set_rnid_parameters(ha, cmd, mode);
3700 		break;
3701 	case EXT_SC_RST_STATISTICS:
3702 		(void) ql_reset_statistics(ha, cmd);
3703 		break;
3704 	case EXT_SC_SET_BEACON_STATE:
3705 		ql_set_led_state(ha, cmd, mode);
3706 		break;
3707 	case EXT_SC_SET_PARMS:
3708 	case EXT_SC_SET_BUS_MODE:
3709 	case EXT_SC_SET_DR_DUMP_BUF:
3710 	case EXT_SC_SET_RISC_CODE:
3711 	case EXT_SC_SET_FLASH_RAM:
3712 	case EXT_SC_SET_LUN_BITMASK:
3713 	case EXT_SC_SET_RETRY_CNT:
3714 	case EXT_SC_SET_RTIN:
3715 	case EXT_SC_SET_FC_LUN_BITMASK:
3716 	case EXT_SC_ADD_TARGET_DEVICE:
3717 	case EXT_SC_SWAP_TARGET_DEVICE:
3718 	case EXT_SC_SET_SEL_TIMEOUT:
3719 	default:
3720 		/* function not supported. */
3721 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3722 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3723 		break;
3724 	}
3725 
3726 	if (cmd->Status != EXT_STATUS_OK) {
3727 		EL(ha, "failed, Status=%d\n", cmd->Status);
3728 	} else {
3729 		/*EMPTY*/
3730 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3731 	}
3732 }
3733 
3734 /*
3735  * ql_get_host_data
3736  *	Performs EXT_CC_GET_DATA subcommands.
3737  *
3738  * Input:
3739  *	ha:	adapter state pointer.
3740  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3741  *	mode:	flags.
3742  *
3743  * Returns:
3744  *	None, request status indicated in cmd->Status.
3745  *
3746  * Context:
3747  *	Kernel context.
3748  */
3749 static void
3750 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3751 {
3752 	int	out_size = 0;
3753 
3754 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3755 	    cmd->SubCode);
3756 
3757 	/* case off on command subcode */
3758 	switch (cmd->SubCode) {
3759 	case EXT_SC_GET_STATISTICS:
3760 		out_size = sizeof (EXT_HBA_PORT_STAT);
3761 		break;
3762 	case EXT_SC_GET_FC_STATISTICS:
3763 		out_size = sizeof (EXT_HBA_PORT_STAT);
3764 		break;
3765 	case EXT_SC_GET_PORT_SUMMARY:
3766 		out_size = sizeof (EXT_DEVICEDATA);
3767 		break;
3768 	case EXT_SC_GET_RNID:
3769 		out_size = sizeof (EXT_RNID_DATA);
3770 		break;
3771 	case EXT_SC_GET_TARGET_ID:
3772 		out_size = sizeof (EXT_DEST_ADDR);
3773 		break;
3774 	case EXT_SC_GET_BEACON_STATE:
3775 		out_size = sizeof (EXT_BEACON_CONTROL);
3776 		break;
3777 	case EXT_SC_GET_FC4_STATISTICS:
3778 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3779 		break;
3780 	case EXT_SC_GET_SCSI_ADDR:
3781 	case EXT_SC_GET_ERR_DETECTIONS:
3782 	case EXT_SC_GET_BUS_MODE:
3783 	case EXT_SC_GET_DR_DUMP_BUF:
3784 	case EXT_SC_GET_RISC_CODE:
3785 	case EXT_SC_GET_FLASH_RAM:
3786 	case EXT_SC_GET_LINK_STATUS:
3787 	case EXT_SC_GET_LOOP_ID:
3788 	case EXT_SC_GET_LUN_BITMASK:
3789 	case EXT_SC_GET_PORT_DATABASE:
3790 	case EXT_SC_GET_PORT_DATABASE_MEM:
3791 	case EXT_SC_GET_POSITION_MAP:
3792 	case EXT_SC_GET_RETRY_CNT:
3793 	case EXT_SC_GET_RTIN:
3794 	case EXT_SC_GET_FC_LUN_BITMASK:
3795 	case EXT_SC_GET_SEL_TIMEOUT:
3796 	default:
3797 		/* function not supported. */
3798 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3799 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3800 		cmd->ResponseLen = 0;
3801 		return;
3802 	}
3803 
3804 	if (cmd->ResponseLen < out_size) {
3805 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3806 		cmd->DetailStatus = out_size;
3807 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3808 		    cmd->ResponseLen, out_size);
3809 		cmd->ResponseLen = 0;
3810 		return;
3811 	}
3812 
3813 	switch (cmd->SubCode) {
3814 	case EXT_SC_GET_RNID:
3815 		ql_get_rnid_parameters(ha, cmd, mode);
3816 		break;
3817 	case EXT_SC_GET_STATISTICS:
3818 		ql_get_statistics(ha, cmd, mode);
3819 		break;
3820 	case EXT_SC_GET_FC_STATISTICS:
3821 		ql_get_statistics_fc(ha, cmd, mode);
3822 		break;
3823 	case EXT_SC_GET_FC4_STATISTICS:
3824 		ql_get_statistics_fc4(ha, cmd, mode);
3825 		break;
3826 	case EXT_SC_GET_PORT_SUMMARY:
3827 		ql_get_port_summary(ha, cmd, mode);
3828 		break;
3829 	case EXT_SC_GET_TARGET_ID:
3830 		ql_get_target_id(ha, cmd, mode);
3831 		break;
3832 	case EXT_SC_GET_BEACON_STATE:
3833 		ql_get_led_state(ha, cmd, mode);
3834 		break;
3835 	}
3836 
3837 	if (cmd->Status != EXT_STATUS_OK) {
3838 		EL(ha, "failed, Status=%d\n", cmd->Status);
3839 	} else {
3840 		/*EMPTY*/
3841 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3842 	}
3843 }
3844 
3845 /* ******************************************************************** */
3846 /*			Helper Functions				*/
3847 /* ******************************************************************** */
3848 
3849 /*
3850  * ql_lun_count
3851  *	Get numbers of LUNS on target.
3852  *
3853  * Input:
3854  *	ha:	adapter state pointer.
3855  *	q:	device queue pointer.
3856  *
3857  * Returns:
3858  *	Number of LUNs.
3859  *
3860  * Context:
3861  *	Kernel context.
3862  */
3863 static int
3864 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3865 {
3866 	int	cnt;
3867 
3868 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3869 
3870 	/* Bypass LUNs that failed. */
3871 	cnt = ql_report_lun(ha, tq);
3872 	if (cnt == 0) {
3873 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3874 	}
3875 
3876 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3877 
3878 	return (cnt);
3879 }
3880 
3881 /*
3882  * ql_report_lun
3883  *	Get numbers of LUNS using report LUN command.
3884  *
3885  * Input:
3886  *	ha:	adapter state pointer.
3887  *	q:	target queue pointer.
3888  *
3889  * Returns:
3890  *	Number of LUNs.
3891  *
3892  * Context:
3893  *	Kernel context.
3894  */
3895 static int
3896 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
3897 {
3898 	int			rval;
3899 	uint8_t			retries;
3900 	ql_mbx_iocb_t		*pkt;
3901 	ql_rpt_lun_lst_t	*rpt;
3902 	dma_mem_t		dma_mem;
3903 	uint32_t		pkt_size, cnt;
3904 	uint16_t		comp_status;
3905 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
3906 
3907 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3908 
3909 	if (DRIVER_SUSPENDED(ha)) {
3910 		EL(ha, "failed, LOOP_NOT_READY\n");
3911 		return (0);
3912 	}
3913 
3914 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
3915 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
3916 	if (pkt == NULL) {
3917 		EL(ha, "failed, kmem_zalloc\n");
3918 		return (0);
3919 	}
3920 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
3921 
3922 	/* Get DMA memory for the IOCB */
3923 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
3924 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
3925 		cmn_err(CE_WARN, "%s(%d): DMA memory "
3926 		    "alloc failed", QL_NAME, ha->instance);
3927 		kmem_free(pkt, pkt_size);
3928 		return (0);
3929 	}
3930 
3931 	for (retries = 0; retries < 4; retries++) {
3932 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3933 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
3934 			pkt->cmd24.entry_count = 1;
3935 
3936 			/* Set N_port handle */
3937 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
3938 
3939 			/* Set target ID */
3940 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
3941 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
3942 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
3943 
3944 			/* Set ISP command timeout. */
3945 			pkt->cmd24.timeout = LE_16(15);
3946 
3947 			/* Load SCSI CDB */
3948 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
3949 			pkt->cmd24.scsi_cdb[6] =
3950 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3951 			pkt->cmd24.scsi_cdb[7] =
3952 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3953 			pkt->cmd24.scsi_cdb[8] =
3954 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3955 			pkt->cmd24.scsi_cdb[9] =
3956 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3957 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
3958 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
3959 				    + cnt, 4);
3960 			}
3961 
3962 			/* Set tag queue control flags */
3963 			pkt->cmd24.task = TA_STAG;
3964 
3965 			/* Set transfer direction. */
3966 			pkt->cmd24.control_flags = CF_RD;
3967 
3968 			/* Set data segment count. */
3969 			pkt->cmd24.dseg_count = LE_16(1);
3970 
3971 			/* Load total byte count. */
3972 			/* Load data descriptor. */
3973 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
3974 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
3975 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
3976 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
3977 			pkt->cmd24.total_byte_count =
3978 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3979 			pkt->cmd24.dseg_0_length =
3980 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3981 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
3982 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
3983 			pkt->cmd3.entry_count = 1;
3984 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3985 				pkt->cmd3.target_l = LSB(tq->loop_id);
3986 				pkt->cmd3.target_h = MSB(tq->loop_id);
3987 			} else {
3988 				pkt->cmd3.target_h = LSB(tq->loop_id);
3989 			}
3990 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
3991 			pkt->cmd3.timeout = LE_16(15);
3992 			pkt->cmd3.dseg_count = LE_16(1);
3993 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
3994 			pkt->cmd3.scsi_cdb[6] =
3995 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3996 			pkt->cmd3.scsi_cdb[7] =
3997 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3998 			pkt->cmd3.scsi_cdb[8] =
3999 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4000 			pkt->cmd3.scsi_cdb[9] =
4001 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4002 			pkt->cmd3.byte_count =
4003 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4004 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4005 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4006 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4007 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4008 			pkt->cmd3.dseg_0_length =
4009 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4010 		} else {
4011 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4012 			pkt->cmd.entry_count = 1;
4013 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4014 				pkt->cmd.target_l = LSB(tq->loop_id);
4015 				pkt->cmd.target_h = MSB(tq->loop_id);
4016 			} else {
4017 				pkt->cmd.target_h = LSB(tq->loop_id);
4018 			}
4019 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4020 			pkt->cmd.timeout = LE_16(15);
4021 			pkt->cmd.dseg_count = LE_16(1);
4022 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4023 			pkt->cmd.scsi_cdb[6] =
4024 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4025 			pkt->cmd.scsi_cdb[7] =
4026 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4027 			pkt->cmd.scsi_cdb[8] =
4028 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4029 			pkt->cmd.scsi_cdb[9] =
4030 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4031 			pkt->cmd.byte_count =
4032 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4033 			pkt->cmd.dseg_0_address = (uint32_t)
4034 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4035 			pkt->cmd.dseg_0_length =
4036 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4037 		}
4038 
4039 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4040 		    sizeof (ql_mbx_iocb_t));
4041 
4042 		/* Sync in coming DMA buffer. */
4043 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4044 		    DDI_DMA_SYNC_FORKERNEL);
4045 		/* Copy in coming DMA data. */
4046 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4047 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4048 
4049 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4050 			pkt->sts24.entry_status = (uint8_t)
4051 			    (pkt->sts24.entry_status & 0x3c);
4052 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4053 			scsi_status_h = pkt->sts24.scsi_status_h;
4054 			scsi_status_l = pkt->sts24.scsi_status_l;
4055 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4056 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4057 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4058 		} else {
4059 			pkt->sts.entry_status = (uint8_t)
4060 			    (pkt->sts.entry_status & 0x7e);
4061 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4062 			scsi_status_h = pkt->sts.scsi_status_h;
4063 			scsi_status_l = pkt->sts.scsi_status_l;
4064 			reqs = &pkt->sts.req_sense_data[0];
4065 		}
4066 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4067 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4068 			    pkt->sts.entry_status, tq->d_id.b24);
4069 			rval = QL_FUNCTION_PARAMETER_ERROR;
4070 		}
4071 
4072 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4073 		    scsi_status_l & STATUS_CHECK) {
4074 			/* Device underrun, treat as OK. */
4075 			if (rval == QL_SUCCESS &&
4076 			    comp_status == CS_DATA_UNDERRUN &&
4077 			    scsi_status_h & FCP_RESID_UNDER) {
4078 				break;
4079 			}
4080 
4081 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4082 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4083 			    comp_status, scsi_status_h, scsi_status_l);
4084 
4085 			if (rval == QL_SUCCESS) {
4086 				if ((comp_status == CS_TIMEOUT) ||
4087 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4088 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4089 					rval = QL_FUNCTION_TIMEOUT;
4090 					break;
4091 				}
4092 				rval = QL_FUNCTION_FAILED;
4093 			} else if (rval == QL_ABORTED) {
4094 				break;
4095 			}
4096 
4097 			if (scsi_status_l & STATUS_CHECK) {
4098 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4099 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4100 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4101 				    reqs[1], reqs[2], reqs[3], reqs[4],
4102 				    reqs[5], reqs[6], reqs[7], reqs[8],
4103 				    reqs[9], reqs[10], reqs[11], reqs[12],
4104 				    reqs[13], reqs[14], reqs[15], reqs[16],
4105 				    reqs[17]);
4106 			}
4107 		} else {
4108 			break;
4109 		}
4110 		bzero((caddr_t)pkt, pkt_size);
4111 	}
4112 
4113 	if (rval != QL_SUCCESS) {
4114 		EL(ha, "failed=%xh\n", rval);
4115 		rval = 0;
4116 	} else {
4117 		QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4118 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4119 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4120 	}
4121 
4122 	kmem_free(pkt, pkt_size);
4123 	ql_free_dma_resource(ha, &dma_mem);
4124 
4125 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4126 
4127 	return (rval);
4128 }
4129 
4130 /*
4131  * ql_inq_scan
4132  *	Get numbers of LUNS using inquiry command.
4133  *
4134  * Input:
4135  *	ha:		adapter state pointer.
4136  *	tq:		target queue pointer.
4137  *	count:		scan for the number of existing LUNs.
4138  *
4139  * Returns:
4140  *	Number of LUNs.
4141  *
4142  * Context:
4143  *	Kernel context.
4144  */
4145 static int
4146 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4147 {
4148 	int		lun, cnt, rval;
4149 	ql_mbx_iocb_t	*pkt;
4150 	uint8_t		*inq;
4151 	uint32_t	pkt_size;
4152 
4153 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4154 
4155 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4156 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4157 	if (pkt == NULL) {
4158 		EL(ha, "failed, kmem_zalloc\n");
4159 		return (0);
4160 	}
4161 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4162 
4163 	cnt = 0;
4164 	for (lun = 0; lun < MAX_LUNS; lun++) {
4165 
4166 		if (DRIVER_SUSPENDED(ha)) {
4167 			rval = QL_LOOP_DOWN;
4168 			cnt = 0;
4169 			break;
4170 		}
4171 
4172 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4173 		if (rval == QL_SUCCESS) {
4174 			switch (*inq) {
4175 			case DTYPE_DIRECT:
4176 			case DTYPE_PROCESSOR:	/* Appliance. */
4177 			case DTYPE_WORM:
4178 			case DTYPE_RODIRECT:
4179 			case DTYPE_SCANNER:
4180 			case DTYPE_OPTICAL:
4181 			case DTYPE_CHANGER:
4182 			case DTYPE_ESI:
4183 				cnt++;
4184 				break;
4185 			case DTYPE_SEQUENTIAL:
4186 				cnt++;
4187 				tq->flags |= TQF_TAPE_DEVICE;
4188 				break;
4189 			default:
4190 				QL_PRINT_9(CE_CONT, "(%d): failed, "
4191 				    "unsupported device id=%xh, lun=%d, "
4192 				    "type=%xh\n", ha->instance, tq->loop_id,
4193 				    lun, *inq);
4194 				break;
4195 			}
4196 
4197 			if (*inq == DTYPE_ESI || cnt >= count) {
4198 				break;
4199 			}
4200 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4201 			cnt = 0;
4202 			break;
4203 		}
4204 	}
4205 
4206 	kmem_free(pkt, pkt_size);
4207 
4208 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4209 
4210 	return (cnt);
4211 }
4212 
4213 /*
4214  * ql_inq
4215  *	Issue inquiry command.
4216  *
4217  * Input:
4218  *	ha:		adapter state pointer.
4219  *	tq:		target queue pointer.
4220  *	lun:		LUN number.
4221  *	pkt:		command and buffer pointer.
4222  *	inq_len:	amount of inquiry data.
4223  *
4224  * Returns:
4225  *	ql local function return status code.
4226  *
4227  * Context:
4228  *	Kernel context.
4229  */
4230 static int
4231 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4232     uint8_t inq_len)
4233 {
4234 	dma_mem_t	dma_mem;
4235 	int		rval, retries;
4236 	uint32_t	pkt_size, cnt;
4237 	uint16_t	comp_status;
4238 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4239 	caddr_t		inq_data;
4240 
4241 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4242 
4243 	if (DRIVER_SUSPENDED(ha)) {
4244 		EL(ha, "failed, loop down\n");
4245 		return (QL_FUNCTION_TIMEOUT);
4246 	}
4247 
4248 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4249 	bzero((caddr_t)pkt, pkt_size);
4250 
4251 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4252 
4253 	/* Get DMA memory for the IOCB */
4254 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4255 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4256 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4257 		    "alloc failed", QL_NAME, ha->instance);
4258 		return (0);
4259 	}
4260 
4261 	for (retries = 0; retries < 4; retries++) {
4262 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4263 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4264 			pkt->cmd24.entry_count = 1;
4265 
4266 			/* Set LUN number */
4267 			pkt->cmd24.fcp_lun[2] = LSB(lun);
4268 			pkt->cmd24.fcp_lun[3] = MSB(lun);
4269 
4270 			/* Set N_port handle */
4271 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4272 
4273 			/* Set target ID */
4274 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4275 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4276 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4277 
4278 			/* Set ISP command timeout. */
4279 			pkt->cmd24.timeout = LE_16(15);
4280 
4281 			/* Load SCSI CDB */
4282 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4283 			pkt->cmd24.scsi_cdb[4] = inq_len;
4284 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4285 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4286 				    + cnt, 4);
4287 			}
4288 
4289 			/* Set tag queue control flags */
4290 			pkt->cmd24.task = TA_STAG;
4291 
4292 			/* Set transfer direction. */
4293 			pkt->cmd24.control_flags = CF_RD;
4294 
4295 			/* Set data segment count. */
4296 			pkt->cmd24.dseg_count = LE_16(1);
4297 
4298 			/* Load total byte count. */
4299 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4300 
4301 			/* Load data descriptor. */
4302 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4303 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4304 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4305 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4306 			pkt->cmd24.dseg_0_length = LE_32(inq_len);
4307 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4308 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4309 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4310 
4311 			pkt->cmd3.entry_count = 1;
4312 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4313 				pkt->cmd3.target_l = LSB(tq->loop_id);
4314 				pkt->cmd3.target_h = MSB(tq->loop_id);
4315 			} else {
4316 				pkt->cmd3.target_h = LSB(tq->loop_id);
4317 			}
4318 			pkt->cmd3.lun_l = LSB(lun);
4319 			pkt->cmd3.lun_h = MSB(lun);
4320 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4321 			pkt->cmd3.timeout = LE_16(15);
4322 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4323 			pkt->cmd3.scsi_cdb[4] = inq_len;
4324 			pkt->cmd3.dseg_count = LE_16(1);
4325 			pkt->cmd3.byte_count = LE_32(inq_len);
4326 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4327 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4328 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4329 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4330 			pkt->cmd3.dseg_0_length = LE_32(inq_len);
4331 		} else {
4332 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4333 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4334 
4335 			pkt->cmd.entry_count = 1;
4336 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4337 				pkt->cmd.target_l = LSB(tq->loop_id);
4338 				pkt->cmd.target_h = MSB(tq->loop_id);
4339 			} else {
4340 				pkt->cmd.target_h = LSB(tq->loop_id);
4341 			}
4342 			pkt->cmd.lun_l = LSB(lun);
4343 			pkt->cmd.lun_h = MSB(lun);
4344 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4345 			pkt->cmd.timeout = LE_16(15);
4346 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4347 			pkt->cmd.scsi_cdb[4] = inq_len;
4348 			pkt->cmd.dseg_count = LE_16(1);
4349 			pkt->cmd.byte_count = LE_32(inq_len);
4350 			pkt->cmd.dseg_0_address = (uint32_t)
4351 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4352 			pkt->cmd.dseg_0_length = LE_32(inq_len);
4353 		}
4354 
4355 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4356 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4357 		    sizeof (ql_mbx_iocb_t));
4358 
4359 		/* Sync in coming IOCB DMA buffer. */
4360 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4361 		    DDI_DMA_SYNC_FORKERNEL);
4362 		/* Copy in coming DMA data. */
4363 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4364 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4365 
4366 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4367 			pkt->sts24.entry_status = (uint8_t)
4368 			    (pkt->sts24.entry_status & 0x3c);
4369 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4370 			scsi_status_h = pkt->sts24.scsi_status_h;
4371 			scsi_status_l = pkt->sts24.scsi_status_l;
4372 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4373 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4374 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4375 		} else {
4376 			pkt->sts.entry_status = (uint8_t)
4377 			    (pkt->sts.entry_status & 0x7e);
4378 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4379 			scsi_status_h = pkt->sts.scsi_status_h;
4380 			scsi_status_l = pkt->sts.scsi_status_l;
4381 			reqs = &pkt->sts.req_sense_data[0];
4382 		}
4383 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4384 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4385 			    pkt->sts.entry_status, tq->d_id.b24);
4386 			rval = QL_FUNCTION_PARAMETER_ERROR;
4387 		}
4388 
4389 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4390 		    scsi_status_l & STATUS_CHECK) {
4391 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4392 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4393 			    comp_status, scsi_status_h, scsi_status_l);
4394 
4395 			if (rval == QL_SUCCESS) {
4396 				if ((comp_status == CS_TIMEOUT) ||
4397 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4398 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4399 					rval = QL_FUNCTION_TIMEOUT;
4400 					break;
4401 				}
4402 				rval = QL_FUNCTION_FAILED;
4403 			}
4404 
4405 			if (scsi_status_l & STATUS_CHECK) {
4406 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4407 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4408 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4409 				    reqs[1], reqs[2], reqs[3], reqs[4],
4410 				    reqs[5], reqs[6], reqs[7], reqs[8],
4411 				    reqs[9], reqs[10], reqs[11], reqs[12],
4412 				    reqs[13], reqs[14], reqs[15], reqs[16],
4413 				    reqs[17]);
4414 			}
4415 		} else {
4416 			break;
4417 		}
4418 	}
4419 	ql_free_dma_resource(ha, &dma_mem);
4420 
4421 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4422 
4423 	return (rval);
4424 }
4425 
4426 /*
4427  * ql_get_buffer_data
4428  *	Copies data from user space to kernal buffer.
4429  *
4430  * Input:
4431  *	src:	User source buffer address.
4432  *	dst:	Kernal destination buffer address.
4433  *	size:	Amount of data.
4434  *	mode:	flags.
4435  *
4436  * Returns:
4437  *	Returns number of bytes transferred.
4438  *
4439  * Context:
4440  *	Kernel context.
4441  */
4442 static uint32_t
4443 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4444 {
4445 	uint32_t	cnt;
4446 
4447 	for (cnt = 0; cnt < size; cnt++) {
4448 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4449 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4450 			break;
4451 		}
4452 	}
4453 
4454 	return (cnt);
4455 }
4456 
4457 /*
4458  * ql_send_buffer_data
4459  *	Copies data from kernal buffer to user space.
4460  *
4461  * Input:
4462  *	src:	Kernal source buffer address.
4463  *	dst:	User destination buffer address.
4464  *	size:	Amount of data.
4465  *	mode:	flags.
4466  *
4467  * Returns:
4468  *	Returns number of bytes transferred.
4469  *
4470  * Context:
4471  *	Kernel context.
4472  */
4473 static uint32_t
4474 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4475 {
4476 	uint32_t	cnt;
4477 
4478 	for (cnt = 0; cnt < size; cnt++) {
4479 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4480 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4481 			break;
4482 		}
4483 	}
4484 
4485 	return (cnt);
4486 }
4487 
4488 /*
4489  * ql_find_port
4490  *	Locates device queue.
4491  *
4492  * Input:
4493  *	ha:	adapter state pointer.
4494  *	name:	device port name.
4495  *
4496  * Returns:
4497  *	Returns target queue pointer.
4498  *
4499  * Context:
4500  *	Kernel context.
4501  */
4502 static ql_tgt_t *
4503 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4504 {
4505 	ql_link_t	*link;
4506 	ql_tgt_t	*tq;
4507 	uint16_t	index;
4508 
4509 	/* Scan port list for requested target */
4510 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4511 		for (link = ha->dev[index].first; link != NULL;
4512 		    link = link->next) {
4513 			tq = link->base_address;
4514 
4515 			switch (type) {
4516 			case QLNT_LOOP_ID:
4517 				if (bcmp(name, &tq->loop_id,
4518 				    sizeof (uint16_t)) == 0) {
4519 					return (tq);
4520 				}
4521 				break;
4522 			case QLNT_PORT:
4523 				if (bcmp(name, tq->port_name, 8) == 0) {
4524 					return (tq);
4525 				}
4526 				break;
4527 			case QLNT_NODE:
4528 				if (bcmp(name, tq->node_name, 8) == 0) {
4529 					return (tq);
4530 				}
4531 				break;
4532 			case QLNT_PID:
4533 				if (bcmp(name, tq->d_id.r.d_id,
4534 				    sizeof (tq->d_id.r.d_id)) == 0) {
4535 					return (tq);
4536 				}
4537 				break;
4538 			default:
4539 				EL(ha, "failed, invalid type=%d\n",  type);
4540 				return (NULL);
4541 			}
4542 		}
4543 	}
4544 
4545 	return (NULL);
4546 }
4547 
4548 /*
4549  * ql_24xx_flash_desc
4550  *	Get flash descriptor table.
4551  *
4552  * Input:
4553  *	ha:		adapter state pointer.
4554  *
4555  * Returns:
4556  *	ql local function return status code.
4557  *
4558  * Context:
4559  *	Kernel context.
4560  */
4561 static int
4562 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4563 {
4564 	uint32_t	cnt;
4565 	uint16_t	chksum, *bp, data;
4566 	int		rval;
4567 	flash_desc_t	*fdesc;
4568 	ql_xioctl_t	*xp = ha->xioctl;
4569 
4570 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4571 
4572 	if (ha->flash_desc_addr == 0) {
4573 		EL(ha, "desc ptr=0\n");
4574 		return (QL_FUNCTION_FAILED);
4575 	}
4576 
4577 	if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4578 		EL(ha, "kmem_zalloc=null\n");
4579 		return (QL_MEMORY_ALLOC_FAILED);
4580 	}
4581 	rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4582 	    ha->flash_desc_addr << 2);
4583 	if (rval != QL_SUCCESS) {
4584 		EL(ha, "read status=%xh\n", rval);
4585 		kmem_free(fdesc, sizeof (flash_desc_t));
4586 		return (rval);
4587 	}
4588 
4589 	chksum = 0;
4590 	bp = (uint16_t *)fdesc;
4591 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4592 		data = *bp++;
4593 		LITTLE_ENDIAN_16(&data);
4594 		chksum += data;
4595 	}
4596 
4597 	LITTLE_ENDIAN_32(&fdesc->flash_valid);
4598 	LITTLE_ENDIAN_16(&fdesc->flash_version);
4599 	LITTLE_ENDIAN_16(&fdesc->flash_len);
4600 	LITTLE_ENDIAN_16(&fdesc->flash_checksum);
4601 	LITTLE_ENDIAN_16(&fdesc->flash_manuf);
4602 	LITTLE_ENDIAN_16(&fdesc->flash_id);
4603 	LITTLE_ENDIAN_32(&fdesc->block_size);
4604 	LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4605 	LITTLE_ENDIAN_32(&fdesc->flash_size);
4606 	LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4607 	LITTLE_ENDIAN_32(&fdesc->read_timeout);
4608 
4609 	/* flash size in desc table is in 1024 bytes */
4610 	fdesc->flash_size = fdesc->flash_size * 0x400;
4611 
4612 	if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4613 	    fdesc->flash_version != FLASH_DESC_VERSION) {
4614 		EL(ha, "invalid descriptor table\n");
4615 		kmem_free(fdesc, sizeof (flash_desc_t));
4616 		return (QL_FUNCTION_FAILED);
4617 	}
4618 
4619 	bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4620 	kmem_free(fdesc, sizeof (flash_desc_t));
4621 
4622 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4623 
4624 	return (QL_SUCCESS);
4625 }
4626 
4627 /*
4628  * ql_setup_flash
4629  *	Gets the manufacturer and id number of the flash chip, and
4630  *	sets up the size parameter.
4631  *
4632  * Input:
4633  *	ha:	adapter state pointer.
4634  *
4635  * Returns:
4636  *	int:	ql local function return status code.
4637  *
4638  * Context:
4639  *	Kernel context.
4640  */
4641 static int
4642 ql_setup_flash(ql_adapter_state_t *ha)
4643 {
4644 	ql_xioctl_t	*xp = ha->xioctl;
4645 	int		rval = QL_SUCCESS;
4646 
4647 	if (xp->fdesc.flash_size != 0) {
4648 		return (rval);
4649 	}
4650 
4651 	if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4652 		return (QL_FUNCTION_FAILED);
4653 	}
4654 
4655 	if (CFG_IST(ha, CFG_CTRL_2581)) {
4656 		/*
4657 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4658 		 * 25xx flash size to avoid failing of ql_dump_focde.
4659 		 */
4660 		ha->xioctl->fdesc.flash_size = CFG_IST(ha, CFG_CTRL_25XX) ?
4661 		    0x200000 : 0x400000;
4662 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4663 			EL(ha, "flash desc table ok, exit\n");
4664 			return (rval);
4665 		}
4666 		(void) ql_24xx_flash_id(ha);
4667 
4668 	} else if (CFG_IST(ha, CFG_CTRL_242581)) {
4669 		(void) ql_24xx_flash_id(ha);
4670 	} else {
4671 		ql_flash_enable(ha);
4672 
4673 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4674 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4675 		ql_write_flash_byte(ha, 0x5555, 0x90);
4676 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4677 
4678 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4679 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4680 			ql_write_flash_byte(ha, 0x5555, 0x55);
4681 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4682 			xp->fdesc.flash_id = (uint16_t)
4683 			    ql_read_flash_byte(ha, 0x0002);
4684 		} else {
4685 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4686 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4687 			ql_write_flash_byte(ha, 0x5555, 0x90);
4688 			xp->fdesc.flash_id = (uint16_t)
4689 			    ql_read_flash_byte(ha, 0x0001);
4690 		}
4691 
4692 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4693 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4694 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4695 
4696 		ql_flash_disable(ha);
4697 	}
4698 
4699 	/* Default flash descriptor table. */
4700 	xp->fdesc.write_statusreg_cmd = 1;
4701 	xp->fdesc.write_enable_bits = 0;
4702 	xp->fdesc.unprotect_sector_cmd = 0;
4703 	xp->fdesc.protect_sector_cmd = 0;
4704 	xp->fdesc.write_disable_bits = 0x9c;
4705 	xp->fdesc.block_size = 0x10000;
4706 	xp->fdesc.erase_cmd = 0xd8;
4707 
4708 	switch (xp->fdesc.flash_manuf) {
4709 	case AMD_FLASH:
4710 		switch (xp->fdesc.flash_id) {
4711 		case SPAN_FLASHID_2048K:
4712 			xp->fdesc.flash_size = 0x200000;
4713 			break;
4714 		case AMD_FLASHID_1024K:
4715 			xp->fdesc.flash_size = 0x100000;
4716 			break;
4717 		case AMD_FLASHID_512K:
4718 		case AMD_FLASHID_512Kt:
4719 		case AMD_FLASHID_512Kb:
4720 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
4721 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4722 			} else {
4723 				xp->fdesc.flash_size = 0x80000;
4724 			}
4725 			break;
4726 		case AMD_FLASHID_128K:
4727 			xp->fdesc.flash_size = 0x20000;
4728 			break;
4729 		default:
4730 			rval = QL_FUNCTION_FAILED;
4731 			break;
4732 		}
4733 		break;
4734 	case ST_FLASH:
4735 		switch (xp->fdesc.flash_id) {
4736 		case ST_FLASHID_128K:
4737 			xp->fdesc.flash_size = 0x20000;
4738 			break;
4739 		case ST_FLASHID_512K:
4740 			xp->fdesc.flash_size = 0x80000;
4741 			break;
4742 		case ST_FLASHID_M25PXX:
4743 			if (xp->fdesc.flash_len == 0x14) {
4744 				xp->fdesc.flash_size = 0x100000;
4745 			} else if (xp->fdesc.flash_len == 0x15) {
4746 				xp->fdesc.flash_size = 0x200000;
4747 			} else {
4748 				rval = QL_FUNCTION_FAILED;
4749 			}
4750 			break;
4751 		default:
4752 			rval = QL_FUNCTION_FAILED;
4753 			break;
4754 		}
4755 		break;
4756 	case SST_FLASH:
4757 		switch (xp->fdesc.flash_id) {
4758 		case SST_FLASHID_128K:
4759 			xp->fdesc.flash_size = 0x20000;
4760 			break;
4761 		case SST_FLASHID_1024K_A:
4762 			xp->fdesc.flash_size = 0x100000;
4763 			xp->fdesc.block_size = 0x8000;
4764 			xp->fdesc.erase_cmd = 0x52;
4765 			break;
4766 		case SST_FLASHID_1024K:
4767 		case SST_FLASHID_1024K_B:
4768 			xp->fdesc.flash_size = 0x100000;
4769 			break;
4770 		case SST_FLASHID_2048K:
4771 			xp->fdesc.flash_size = 0x200000;
4772 			break;
4773 		default:
4774 			rval = QL_FUNCTION_FAILED;
4775 			break;
4776 		}
4777 		break;
4778 	case MXIC_FLASH:
4779 		switch (xp->fdesc.flash_id) {
4780 		case MXIC_FLASHID_512K:
4781 			xp->fdesc.flash_size = 0x80000;
4782 			break;
4783 		case MXIC_FLASHID_1024K:
4784 			xp->fdesc.flash_size = 0x100000;
4785 			break;
4786 		case MXIC_FLASHID_25LXX:
4787 			if (xp->fdesc.flash_len == 0x14) {
4788 				xp->fdesc.flash_size = 0x100000;
4789 			} else if (xp->fdesc.flash_len == 0x15) {
4790 				xp->fdesc.flash_size = 0x200000;
4791 			} else {
4792 				rval = QL_FUNCTION_FAILED;
4793 			}
4794 			break;
4795 		default:
4796 			rval = QL_FUNCTION_FAILED;
4797 			break;
4798 		}
4799 		break;
4800 	case ATMEL_FLASH:
4801 		switch (xp->fdesc.flash_id) {
4802 		case ATMEL_FLASHID_1024K:
4803 			xp->fdesc.flash_size = 0x100000;
4804 			xp->fdesc.write_disable_bits = 0xbc;
4805 			xp->fdesc.unprotect_sector_cmd = 0x39;
4806 			xp->fdesc.protect_sector_cmd = 0x36;
4807 			break;
4808 		default:
4809 			rval = QL_FUNCTION_FAILED;
4810 			break;
4811 		}
4812 		break;
4813 	case WINBOND_FLASH:
4814 		switch (xp->fdesc.flash_id) {
4815 		case WINBOND_FLASHID:
4816 			if (xp->fdesc.flash_len == 0x15) {
4817 				xp->fdesc.flash_size = 0x200000;
4818 			} else if (xp->fdesc.flash_len == 0x16) {
4819 				xp->fdesc.flash_size = 0x400000;
4820 			} else if (xp->fdesc.flash_len == 0x17) {
4821 				xp->fdesc.flash_size = 0x800000;
4822 			} else {
4823 				rval = QL_FUNCTION_FAILED;
4824 			}
4825 			break;
4826 		default:
4827 			rval = QL_FUNCTION_FAILED;
4828 			break;
4829 		}
4830 		break;
4831 	case INTEL_FLASH:
4832 		switch (xp->fdesc.flash_id) {
4833 		case INTEL_FLASHID:
4834 			if (xp->fdesc.flash_len == 0x11) {
4835 				xp->fdesc.flash_size = 0x200000;
4836 			} else if (xp->fdesc.flash_len == 0x12) {
4837 				xp->fdesc.flash_size = 0x400000;
4838 			} else if (xp->fdesc.flash_len == 0x13) {
4839 				xp->fdesc.flash_size = 0x800000;
4840 			} else {
4841 				rval = QL_FUNCTION_FAILED;
4842 			}
4843 			break;
4844 		default:
4845 			rval = QL_FUNCTION_FAILED;
4846 			break;
4847 		}
4848 		break;
4849 	default:
4850 		rval = QL_FUNCTION_FAILED;
4851 		break;
4852 	}
4853 
4854 	/* Try flash table later. */
4855 	if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_242581)) {
4856 		EL(ha, "no default id\n");
4857 		return (QL_SUCCESS);
4858 	}
4859 
4860 	/*
4861 	 * hack for non std 2312 and 6312 boards. hardware people need to
4862 	 * use either the 128k flash chip (original), or something larger.
4863 	 * For driver purposes, we'll treat it as a 128k flash chip.
4864 	 */
4865 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
4866 	    ha->device_id == 0x6322) && (xp->fdesc.flash_size > 0x20000) &&
4867 	    (CFG_IST(ha, CFG_SBUS_CARD) ==  0)) {
4868 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
4869 		    xp->fdesc.flash_size);
4870 		xp->fdesc.flash_size = 0x20000;
4871 	}
4872 
4873 	if (rval == QL_SUCCESS) {
4874 		EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
4875 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id,
4876 		    xp->fdesc.flash_size);
4877 	} else {
4878 		EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
4879 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id);
4880 	}
4881 
4882 	return (rval);
4883 }
4884 
4885 /*
4886  * ql_flash_fcode_load
4887  *	Loads fcode data into flash from application.
4888  *
4889  * Input:
4890  *	ha:	adapter state pointer.
4891  *	bp:	user buffer address.
4892  *	size:	user buffer size.
4893  *	mode:	flags
4894  *
4895  * Returns:
4896  *
4897  * Context:
4898  *	Kernel context.
4899  */
4900 static int
4901 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
4902     int mode)
4903 {
4904 	uint8_t		*bfp;
4905 	ql_xioctl_t	*xp = ha->xioctl;
4906 	int		rval = 0;
4907 
4908 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4909 
4910 	if (bsize > xp->fdesc.flash_size) {
4911 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
4912 		    xp->fdesc.flash_size);
4913 		return (ENOMEM);
4914 	}
4915 
4916 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
4917 		EL(ha, "failed, kmem_zalloc\n");
4918 		rval = ENOMEM;
4919 	} else  {
4920 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
4921 			EL(ha, "failed, ddi_copyin\n");
4922 			rval = EFAULT;
4923 		} else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
4924 			EL(ha, "failed, load_fcode\n");
4925 			rval = EFAULT;
4926 		} else {
4927 			/* Reset caches on all adapter instances. */
4928 			ql_update_flash_caches(ha);
4929 			rval = 0;
4930 		}
4931 		kmem_free(bfp, bsize);
4932 	}
4933 
4934 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4935 
4936 	return (rval);
4937 }
4938 
4939 /*
4940  * ql_load_fcode
4941  *	Loads fcode in to flash.
4942  *
4943  * Input:
4944  *	ha:	adapter state pointer.
4945  *	dp:	data pointer.
4946  *	size:	data length.
4947  *	addr:	flash byte address.
4948  *
4949  * Returns:
4950  *	ql local function return status code.
4951  *
4952  * Context:
4953  *	Kernel context.
4954  */
4955 int
4956 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
4957 {
4958 	uint32_t	cnt;
4959 	int		rval;
4960 
4961 	if (CFG_IST(ha, CFG_CTRL_242581)) {
4962 		return (ql_24xx_load_flash(ha, dp, size, addr));
4963 	}
4964 
4965 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4966 
4967 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
4968 		/*
4969 		 * sbus has an additional check to make
4970 		 * sure they don't brick the HBA.
4971 		 */
4972 		if (dp[0] != 0xf1) {
4973 			EL(ha, "failed, incorrect fcode for sbus\n");
4974 			return (QL_FUNCTION_PARAMETER_ERROR);
4975 		}
4976 	}
4977 
4978 	GLOBAL_HW_LOCK();
4979 
4980 	/* Enable Flash Read/Write. */
4981 	ql_flash_enable(ha);
4982 
4983 	/* Erase flash prior to write. */
4984 	rval = ql_erase_flash(ha, 0);
4985 
4986 	if (rval == QL_SUCCESS) {
4987 		/* Write fcode data to flash. */
4988 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
4989 			/* Allow other system activity. */
4990 			if (cnt % 0x1000 == 0) {
4991 				drv_usecwait(1);
4992 			}
4993 			rval = ql_program_flash_address(ha, addr++, *dp++);
4994 			if (rval != QL_SUCCESS)
4995 				break;
4996 		}
4997 	}
4998 
4999 	ql_flash_disable(ha);
5000 
5001 	GLOBAL_HW_UNLOCK();
5002 
5003 	if (rval != QL_SUCCESS) {
5004 		EL(ha, "failed, rval=%xh\n", rval);
5005 	} else {
5006 		/*EMPTY*/
5007 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5008 	}
5009 	return (rval);
5010 }
5011 
5012 /*
5013  * ql_flash_fcode_dump
5014  *	Dumps FLASH to application.
5015  *
5016  * Input:
5017  *	ha:	adapter state pointer.
5018  *	bp:	user buffer address.
5019  *	bsize:	user buffer size
5020  *	faddr:	flash byte address
5021  *	mode:	flags
5022  *
5023  * Returns:
5024  *
5025  * Context:
5026  *	Kernel context.
5027  */
5028 static int
5029 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5030     uint32_t faddr, int mode)
5031 {
5032 	uint8_t		*bfp;
5033 	int		rval;
5034 	ql_xioctl_t	*xp = ha->xioctl;
5035 
5036 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5037 
5038 	/* adjust max read size to flash size */
5039 	if (bsize > xp->fdesc.flash_size) {
5040 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5041 		    xp->fdesc.flash_size);
5042 		bsize = xp->fdesc.flash_size;
5043 	}
5044 
5045 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5046 		EL(ha, "failed, kmem_zalloc\n");
5047 		rval = ENOMEM;
5048 	} else {
5049 		/* Dump Flash fcode. */
5050 		rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5051 
5052 		if (rval != QL_SUCCESS) {
5053 			EL(ha, "failed, dump_fcode = %x\n", rval);
5054 			rval = EFAULT;
5055 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5056 			EL(ha, "failed, ddi_copyout\n");
5057 			rval = EFAULT;
5058 		} else {
5059 			rval = 0;
5060 		}
5061 		kmem_free(bfp, bsize);
5062 	}
5063 
5064 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5065 
5066 	return (rval);
5067 }
5068 
5069 /*
5070  * ql_dump_fcode
5071  *	Dumps fcode from flash.
5072  *
5073  * Input:
5074  *	ha:		adapter state pointer.
5075  *	dp:		data pointer.
5076  *	size:		data length in bytes.
5077  *	startpos:	starting position in flash (byte address).
5078  *
5079  * Returns:
5080  *	ql local function return status code.
5081  *
5082  * Context:
5083  *	Kernel context.
5084  *
5085  */
5086 int
5087 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5088     uint32_t startpos)
5089 {
5090 	uint32_t	cnt, data, addr;
5091 	uint8_t		bp[4];
5092 	int		rval = QL_SUCCESS;
5093 
5094 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5095 
5096 	/* make sure startpos+size doesn't exceed flash */
5097 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5098 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5099 		    size, startpos, ha->xioctl->fdesc.flash_size);
5100 		return (QL_FUNCTION_PARAMETER_ERROR);
5101 	}
5102 
5103 	if (CFG_IST(ha, CFG_CTRL_242581)) {
5104 		/* check start addr is 32 bit aligned for 24xx */
5105 		if ((startpos & 0x3) != 0) {
5106 			rval = ql_24xx_read_flash(ha,
5107 			    ha->flash_data_addr | startpos >> 2, &data);
5108 			if (rval != QL_SUCCESS) {
5109 				EL(ha, "failed2, rval = %xh\n", rval);
5110 				return (rval);
5111 			}
5112 			bp[0] = LSB(LSW(data));
5113 			bp[1] = MSB(LSW(data));
5114 			bp[2] = LSB(MSW(data));
5115 			bp[3] = MSB(MSW(data));
5116 			while (size && startpos & 0x3) {
5117 				*dp++ = bp[startpos & 0x3];
5118 				startpos++;
5119 				size--;
5120 			}
5121 			if (size == 0) {
5122 				QL_PRINT_9(CE_CONT, "(%d): done2\n",
5123 				    ha->instance);
5124 				return (rval);
5125 			}
5126 		}
5127 
5128 		/* adjust 24xx start addr for 32 bit words */
5129 		addr = startpos / 4 | ha->flash_data_addr;
5130 	}
5131 
5132 	GLOBAL_HW_LOCK();
5133 
5134 	/* Enable Flash Read/Write. */
5135 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
5136 		ql_flash_enable(ha);
5137 	}
5138 
5139 	/* Read fcode data from flash. */
5140 	while (size) {
5141 		/* Allow other system activity. */
5142 		if (size % 0x1000 == 0) {
5143 			ql_delay(ha, 100000);
5144 		}
5145 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5146 			rval = ql_24xx_read_flash(ha, addr++, &data);
5147 			if (rval != QL_SUCCESS) {
5148 				break;
5149 			}
5150 			bp[0] = LSB(LSW(data));
5151 			bp[1] = MSB(LSW(data));
5152 			bp[2] = LSB(MSW(data));
5153 			bp[3] = MSB(MSW(data));
5154 			for (cnt = 0; size && cnt < 4; size--) {
5155 				*dp++ = bp[cnt++];
5156 			}
5157 		} else {
5158 			*dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5159 			size--;
5160 		}
5161 	}
5162 
5163 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
5164 		ql_flash_disable(ha);
5165 	}
5166 
5167 	GLOBAL_HW_UNLOCK();
5168 
5169 	if (rval != QL_SUCCESS) {
5170 		EL(ha, "failed, rval = %xh\n", rval);
5171 	} else {
5172 		/*EMPTY*/
5173 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5174 	}
5175 	return (rval);
5176 }
5177 
5178 /*
5179  * ql_program_flash_address
5180  *	Program flash address.
5181  *
5182  * Input:
5183  *	ha:	adapter state pointer.
5184  *	addr:	flash byte address.
5185  *	data:	data to be written to flash.
5186  *
5187  * Returns:
5188  *	ql local function return status code.
5189  *
5190  * Context:
5191  *	Kernel context.
5192  */
5193 static int
5194 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5195     uint8_t data)
5196 {
5197 	int	rval;
5198 
5199 	/* Write Program Command Sequence */
5200 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5201 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5202 		ql_write_flash_byte(ha, addr, data);
5203 	} else {
5204 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5205 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5206 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5207 		ql_write_flash_byte(ha, addr, data);
5208 	}
5209 
5210 	/* Wait for write to complete. */
5211 	rval = ql_poll_flash(ha, addr, data);
5212 
5213 	if (rval != QL_SUCCESS) {
5214 		EL(ha, "failed, rval=%xh\n", rval);
5215 	}
5216 	return (rval);
5217 }
5218 
5219 /*
5220  * ql_set_rnid_parameters
5221  *	Set RNID parameters.
5222  *
5223  * Input:
5224  *	ha:	adapter state pointer.
5225  *	cmd:	User space CT arguments pointer.
5226  *	mode:	flags.
5227  */
5228 static void
5229 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5230 {
5231 	EXT_SET_RNID_REQ	tmp_set;
5232 	EXT_RNID_DATA		*tmp_buf;
5233 	int			rval = 0;
5234 
5235 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5236 
5237 	if (DRIVER_SUSPENDED(ha)) {
5238 		EL(ha, "failed, LOOP_NOT_READY\n");
5239 		cmd->Status = EXT_STATUS_BUSY;
5240 		cmd->ResponseLen = 0;
5241 		return;
5242 	}
5243 
5244 	cmd->ResponseLen = 0; /* NO response to caller. */
5245 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5246 		/* parameter error */
5247 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5248 		    cmd->RequestLen);
5249 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5250 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5251 		cmd->ResponseLen = 0;
5252 		return;
5253 	}
5254 
5255 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5256 	    cmd->RequestLen, mode);
5257 	if (rval != 0) {
5258 		EL(ha, "failed, ddi_copyin\n");
5259 		cmd->Status = EXT_STATUS_COPY_ERR;
5260 		cmd->ResponseLen = 0;
5261 		return;
5262 	}
5263 
5264 	/* Allocate memory for command. */
5265 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5266 	if (tmp_buf == NULL) {
5267 		EL(ha, "failed, kmem_zalloc\n");
5268 		cmd->Status = EXT_STATUS_NO_MEMORY;
5269 		cmd->ResponseLen = 0;
5270 		return;
5271 	}
5272 
5273 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5274 	    (caddr_t)tmp_buf);
5275 	if (rval != QL_SUCCESS) {
5276 		/* error */
5277 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5278 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5279 		cmd->Status = EXT_STATUS_ERR;
5280 		cmd->ResponseLen = 0;
5281 		return;
5282 	}
5283 
5284 	/* Now set the requested params. */
5285 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5286 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5287 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5288 
5289 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5290 	    (caddr_t)tmp_buf);
5291 	if (rval != QL_SUCCESS) {
5292 		/* error */
5293 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5294 		cmd->Status = EXT_STATUS_ERR;
5295 		cmd->ResponseLen = 0;
5296 	}
5297 
5298 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5299 
5300 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5301 }
5302 
5303 /*
5304  * ql_get_rnid_parameters
5305  *	Get RNID parameters.
5306  *
5307  * Input:
5308  *	ha:	adapter state pointer.
5309  *	cmd:	User space CT arguments pointer.
5310  *	mode:	flags.
5311  */
5312 static void
5313 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5314 {
5315 	EXT_RNID_DATA	*tmp_buf;
5316 	uint32_t	rval;
5317 
5318 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5319 
5320 	if (DRIVER_SUSPENDED(ha)) {
5321 		EL(ha, "failed, LOOP_NOT_READY\n");
5322 		cmd->Status = EXT_STATUS_BUSY;
5323 		cmd->ResponseLen = 0;
5324 		return;
5325 	}
5326 
5327 	/* Allocate memory for command. */
5328 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5329 	if (tmp_buf == NULL) {
5330 		EL(ha, "failed, kmem_zalloc\n");
5331 		cmd->Status = EXT_STATUS_NO_MEMORY;
5332 		cmd->ResponseLen = 0;
5333 		return;
5334 	}
5335 
5336 	/* Send command */
5337 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5338 	    (caddr_t)tmp_buf);
5339 	if (rval != QL_SUCCESS) {
5340 		/* error */
5341 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5342 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5343 		cmd->Status = EXT_STATUS_ERR;
5344 		cmd->ResponseLen = 0;
5345 		return;
5346 	}
5347 
5348 	/* Copy the response */
5349 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5350 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5351 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5352 		EL(ha, "failed, ddi_copyout\n");
5353 		cmd->Status = EXT_STATUS_COPY_ERR;
5354 		cmd->ResponseLen = 0;
5355 	} else {
5356 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5357 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5358 	}
5359 
5360 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5361 }
5362 
5363 /*
5364  * ql_reset_statistics
5365  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5366  *
5367  * Input:
5368  *	ha:	adapter state pointer.
5369  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5370  *
5371  * Returns:
5372  *	None, request status indicated in cmd->Status.
5373  *
5374  * Context:
5375  *	Kernel context.
5376  */
5377 static int
5378 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5379 {
5380 	ql_xioctl_t		*xp = ha->xioctl;
5381 	int			rval = 0;
5382 
5383 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5384 
5385 	if (DRIVER_SUSPENDED(ha)) {
5386 		EL(ha, "failed, LOOP_NOT_READY\n");
5387 		cmd->Status = EXT_STATUS_BUSY;
5388 		cmd->ResponseLen = 0;
5389 		return (QL_FUNCTION_SUSPENDED);
5390 	}
5391 
5392 	rval = ql_reset_link_status(ha);
5393 	if (rval != QL_SUCCESS) {
5394 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5395 		cmd->Status = EXT_STATUS_MAILBOX;
5396 		cmd->DetailStatus = rval;
5397 		cmd->ResponseLen = 0;
5398 	}
5399 
5400 	TASK_DAEMON_LOCK(ha);
5401 	xp->IosRequested = 0;
5402 	xp->BytesRequested = 0;
5403 	xp->IOInputRequests = 0;
5404 	xp->IOOutputRequests = 0;
5405 	xp->IOControlRequests = 0;
5406 	xp->IOInputMByteCnt = 0;
5407 	xp->IOOutputMByteCnt = 0;
5408 	xp->IOOutputByteCnt = 0;
5409 	xp->IOInputByteCnt = 0;
5410 	TASK_DAEMON_UNLOCK(ha);
5411 
5412 	INTR_LOCK(ha);
5413 	xp->ControllerErrorCount = 0;
5414 	xp->DeviceErrorCount = 0;
5415 	xp->TotalLipResets = 0;
5416 	xp->TotalInterrupts = 0;
5417 	INTR_UNLOCK(ha);
5418 
5419 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5420 
5421 	return (rval);
5422 }
5423 
5424 /*
5425  * ql_get_statistics
5426  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5427  *
5428  * Input:
5429  *	ha:	adapter state pointer.
5430  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5431  *	mode:	flags.
5432  *
5433  * Returns:
5434  *	None, request status indicated in cmd->Status.
5435  *
5436  * Context:
5437  *	Kernel context.
5438  */
5439 static void
5440 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5441 {
5442 	EXT_HBA_PORT_STAT	ps = {0};
5443 	ql_link_stats_t		*ls;
5444 	int			rval;
5445 	ql_xioctl_t		*xp = ha->xioctl;
5446 	int			retry = 10;
5447 
5448 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5449 
5450 	while (ha->task_daemon_flags &
5451 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5452 		ql_delay(ha, 10000000);	/* 10 second delay */
5453 
5454 		retry--;
5455 
5456 		if (retry == 0) { /* effectively 100 seconds */
5457 			EL(ha, "failed, LOOP_NOT_READY\n");
5458 			cmd->Status = EXT_STATUS_BUSY;
5459 			cmd->ResponseLen = 0;
5460 			return;
5461 		}
5462 	}
5463 
5464 	/* Allocate memory for command. */
5465 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5466 	if (ls == NULL) {
5467 		EL(ha, "failed, kmem_zalloc\n");
5468 		cmd->Status = EXT_STATUS_NO_MEMORY;
5469 		cmd->ResponseLen = 0;
5470 		return;
5471 	}
5472 
5473 	/*
5474 	 * I think these are supposed to be port statistics
5475 	 * the loop ID or port ID should be in cmd->Instance.
5476 	 */
5477 	rval = ql_get_status_counts(ha, (uint16_t)
5478 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5479 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5480 	if (rval != QL_SUCCESS) {
5481 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5482 		    ha->loop_id);
5483 		cmd->Status = EXT_STATUS_MAILBOX;
5484 		cmd->DetailStatus = rval;
5485 		cmd->ResponseLen = 0;
5486 	} else {
5487 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5488 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5489 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5490 		    xp->IOOutputRequests + xp->IOControlRequests);
5491 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5492 		    xp->IOOutputMByteCnt);
5493 		ps.LipResetCount = xp->TotalLipResets;
5494 		ps.InterruptCount = xp->TotalInterrupts;
5495 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5496 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5497 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5498 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5499 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5500 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5501 
5502 		rval = ddi_copyout((void *)&ps,
5503 		    (void *)(uintptr_t)cmd->ResponseAdr,
5504 		    sizeof (EXT_HBA_PORT_STAT), mode);
5505 		if (rval != 0) {
5506 			EL(ha, "failed, ddi_copyout\n");
5507 			cmd->Status = EXT_STATUS_COPY_ERR;
5508 			cmd->ResponseLen = 0;
5509 		} else {
5510 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5511 		}
5512 	}
5513 
5514 	kmem_free(ls, sizeof (ql_link_stats_t));
5515 
5516 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5517 }
5518 
5519 /*
5520  * ql_get_statistics_fc
5521  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5522  *
5523  * Input:
5524  *	ha:	adapter state pointer.
5525  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5526  *	mode:	flags.
5527  *
5528  * Returns:
5529  *	None, request status indicated in cmd->Status.
5530  *
5531  * Context:
5532  *	Kernel context.
5533  */
5534 static void
5535 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5536 {
5537 	EXT_HBA_PORT_STAT	ps = {0};
5538 	ql_link_stats_t		*ls;
5539 	int			rval;
5540 	uint16_t		qlnt;
5541 	EXT_DEST_ADDR		pextdestaddr;
5542 	uint8_t			*name;
5543 	ql_tgt_t		*tq = NULL;
5544 	int			retry = 10;
5545 
5546 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5547 
5548 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5549 	    (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5550 		EL(ha, "failed, ddi_copyin\n");
5551 		cmd->Status = EXT_STATUS_COPY_ERR;
5552 		cmd->ResponseLen = 0;
5553 		return;
5554 	}
5555 
5556 	qlnt = QLNT_PORT;
5557 	name = pextdestaddr.DestAddr.WWPN;
5558 
5559 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5560 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5561 	    name[5], name[6], name[7]);
5562 
5563 	tq = ql_find_port(ha, name, qlnt);
5564 
5565 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5566 		EL(ha, "failed, fc_port not found\n");
5567 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5568 		cmd->ResponseLen = 0;
5569 		return;
5570 	}
5571 
5572 	while (ha->task_daemon_flags &
5573 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE  | DRIVER_STALL)) {
5574 		ql_delay(ha, 10000000);	/* 10 second delay */
5575 
5576 		retry--;
5577 
5578 		if (retry == 0) { /* effectively 100 seconds */
5579 			EL(ha, "failed, LOOP_NOT_READY\n");
5580 			cmd->Status = EXT_STATUS_BUSY;
5581 			cmd->ResponseLen = 0;
5582 			return;
5583 		}
5584 	}
5585 
5586 	/* Allocate memory for command. */
5587 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5588 	if (ls == NULL) {
5589 		EL(ha, "failed, kmem_zalloc\n");
5590 		cmd->Status = EXT_STATUS_NO_MEMORY;
5591 		cmd->ResponseLen = 0;
5592 		return;
5593 	}
5594 
5595 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5596 	    (caddr_t)ls, 0);
5597 	if (rval != QL_SUCCESS) {
5598 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5599 		    tq->d_id.b24);
5600 		cmd->Status = EXT_STATUS_MAILBOX;
5601 		cmd->DetailStatus = rval;
5602 		cmd->ResponseLen = 0;
5603 	} else {
5604 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5605 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5606 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5607 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5608 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5609 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5610 
5611 		rval = ddi_copyout((void *)&ps,
5612 		    (void *)(uintptr_t)cmd->ResponseAdr,
5613 		    sizeof (EXT_HBA_PORT_STAT), mode);
5614 
5615 		if (rval != 0) {
5616 			EL(ha, "failed, ddi_copyout\n");
5617 			cmd->Status = EXT_STATUS_COPY_ERR;
5618 			cmd->ResponseLen = 0;
5619 		} else {
5620 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5621 		}
5622 	}
5623 
5624 	kmem_free(ls, sizeof (ql_link_stats_t));
5625 
5626 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5627 }
5628 
5629 /*
5630  * ql_get_statistics_fc4
5631  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5632  *
5633  * Input:
5634  *	ha:	adapter state pointer.
5635  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5636  *	mode:	flags.
5637  *
5638  * Returns:
5639  *	None, request status indicated in cmd->Status.
5640  *
5641  * Context:
5642  *	Kernel context.
5643  */
5644 static void
5645 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5646 {
5647 	uint32_t		rval;
5648 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5649 	ql_xioctl_t		*xp = ha->xioctl;
5650 
5651 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5652 
5653 	fc4stats.InputRequests = xp->IOInputRequests;
5654 	fc4stats.OutputRequests = xp->IOOutputRequests;
5655 	fc4stats.ControlRequests = xp->IOControlRequests;
5656 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5657 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5658 
5659 	rval = ddi_copyout((void *)&fc4stats,
5660 	    (void *)(uintptr_t)cmd->ResponseAdr,
5661 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
5662 
5663 	if (rval != 0) {
5664 		EL(ha, "failed, ddi_copyout\n");
5665 		cmd->Status = EXT_STATUS_COPY_ERR;
5666 		cmd->ResponseLen = 0;
5667 	} else {
5668 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5669 	}
5670 
5671 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5672 }
5673 
5674 /*
5675  * ql_set_led_state
5676  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5677  *
5678  * Input:
5679  *	ha:	adapter state pointer.
5680  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5681  *	mode:	flags.
5682  *
5683  * Returns:
5684  *	None, request status indicated in cmd->Status.
5685  *
5686  * Context:
5687  *	Kernel context.
5688  */
5689 static void
5690 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5691 {
5692 	EXT_BEACON_CONTROL	bstate;
5693 	uint32_t		rval;
5694 	ql_xioctl_t		*xp = ha->xioctl;
5695 
5696 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5697 
5698 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5699 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5700 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5701 		EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
5702 		    " Len=%xh\n", cmd->RequestLen);
5703 		cmd->ResponseLen = 0;
5704 		return;
5705 	}
5706 
5707 	if (ha->device_id < 0x2300) {
5708 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5709 		cmd->DetailStatus = 0;
5710 		EL(ha, "done - failed, Invalid function for HBA model\n");
5711 		cmd->ResponseLen = 0;
5712 		return;
5713 	}
5714 
5715 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5716 	    cmd->RequestLen, mode);
5717 
5718 	if (rval != 0) {
5719 		cmd->Status = EXT_STATUS_COPY_ERR;
5720 		EL(ha, "done -  failed, ddi_copyin\n");
5721 		return;
5722 	}
5723 
5724 	switch (bstate.State) {
5725 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
5726 		if (xp->ledstate.BeaconState == BEACON_OFF) {
5727 			/* not quite an error -- LED state is already off */
5728 			cmd->Status = EXT_STATUS_OK;
5729 			EL(ha, "LED off request -- LED is already off\n");
5730 			break;
5731 		}
5732 
5733 		xp->ledstate.BeaconState = BEACON_OFF;
5734 		xp->ledstate.LEDflags = LED_ALL_OFF;
5735 
5736 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5737 			cmd->Status = EXT_STATUS_MAILBOX;
5738 		} else {
5739 			cmd->Status = EXT_STATUS_OK;
5740 		}
5741 		break;
5742 
5743 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
5744 		if (xp->ledstate.BeaconState == BEACON_ON) {
5745 			/* not quite an error -- LED state is already on */
5746 			cmd->Status = EXT_STATUS_OK;
5747 			EL(ha, "LED on request  - LED is already on\n");
5748 			break;
5749 		}
5750 
5751 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5752 			cmd->Status = EXT_STATUS_MAILBOX;
5753 			break;
5754 		}
5755 
5756 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5757 			xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5758 		} else {
5759 			xp->ledstate.LEDflags = LED_GREEN;
5760 		}
5761 		xp->ledstate.BeaconState = BEACON_ON;
5762 
5763 		cmd->Status = EXT_STATUS_OK;
5764 		break;
5765 	default:
5766 		cmd->Status = EXT_STATUS_ERR;
5767 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
5768 		break;
5769 	}
5770 
5771 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5772 }
5773 
5774 /*
5775  * ql_get_led_state
5776  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5777  *
5778  * Input:
5779  *	ha:	adapter state pointer.
5780  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5781  *	mode:	flags.
5782  *
5783  * Returns:
5784  *	None, request status indicated in cmd->Status.
5785  *
5786  * Context:
5787  *	Kernel context.
5788  */
5789 static void
5790 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5791 {
5792 	EXT_BEACON_CONTROL	bstate = {0};
5793 	uint32_t		rval;
5794 	ql_xioctl_t		*xp = ha->xioctl;
5795 
5796 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5797 
5798 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5799 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5800 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5801 		EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
5802 		    "Len=%xh\n", cmd->ResponseLen);
5803 		cmd->ResponseLen = 0;
5804 		return;
5805 	}
5806 
5807 	if (ha->device_id < 0x2300) {
5808 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5809 		cmd->DetailStatus = 0;
5810 		EL(ha, "done - failed, Invalid function for HBA model\n");
5811 		cmd->ResponseLen = 0;
5812 		return;
5813 	}
5814 
5815 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5816 		cmd->Status = EXT_STATUS_BUSY;
5817 		EL(ha, "done -  failed, isp abort active\n");
5818 		cmd->ResponseLen = 0;
5819 		return;
5820 	}
5821 
5822 	/* inform the user of the current beacon state (off or on) */
5823 	bstate.State = xp->ledstate.BeaconState;
5824 
5825 	rval = ddi_copyout((void *)&bstate,
5826 	    (void *)(uintptr_t)cmd->ResponseAdr,
5827 	    sizeof (EXT_BEACON_CONTROL), mode);
5828 
5829 	if (rval != 0) {
5830 		EL(ha, "failed, ddi_copyout\n");
5831 		cmd->Status = EXT_STATUS_COPY_ERR;
5832 		cmd->ResponseLen = 0;
5833 	} else {
5834 		cmd->Status = EXT_STATUS_OK;
5835 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
5836 	}
5837 
5838 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5839 }
5840 
5841 /*
5842  * ql_blink_led
5843  *	Determine the next state of the LED and drive it
5844  *
5845  * Input:
5846  *	ha:	adapter state pointer.
5847  *
5848  * Context:
5849  *	Interrupt context.
5850  */
5851 void
5852 ql_blink_led(ql_adapter_state_t *ha)
5853 {
5854 	uint32_t		nextstate;
5855 	ql_xioctl_t		*xp = ha->xioctl;
5856 
5857 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5858 
5859 	if (xp->ledstate.BeaconState == BEACON_ON) {
5860 		/* determine the next led state */
5861 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5862 			nextstate = (xp->ledstate.LEDflags) &
5863 			    (~(RD32_IO_REG(ha, gpiod)));
5864 		} else {
5865 			nextstate = (xp->ledstate.LEDflags) &
5866 			    (~(RD16_IO_REG(ha, gpiod)));
5867 		}
5868 
5869 		/* turn the led on or off */
5870 		ql_drive_led(ha, nextstate);
5871 	}
5872 
5873 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5874 }
5875 
5876 /*
5877  * ql_drive_led
5878  *	drive the led's as determined by LEDflags
5879  *
5880  * Input:
5881  *	ha:		adapter state pointer.
5882  *	LEDflags:	LED flags
5883  *
5884  * Context:
5885  *	Kernel/Interrupt context.
5886  */
5887 static void
5888 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
5889 {
5890 
5891 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5892 
5893 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
5894 
5895 		uint16_t	gpio_enable, gpio_data;
5896 
5897 		/* setup to send new data */
5898 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
5899 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
5900 		WRT16_IO_REG(ha, gpioe, gpio_enable);
5901 
5902 		/* read current data and clear out old led data */
5903 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
5904 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
5905 
5906 		/* set in the new led data. */
5907 		gpio_data = (uint16_t)(gpio_data | LEDflags);
5908 
5909 		/* write out the new led data */
5910 		WRT16_IO_REG(ha, gpiod, gpio_data);
5911 
5912 	} else if (CFG_IST(ha, CFG_CTRL_242581)) {
5913 
5914 		uint32_t	gpio_data;
5915 
5916 		/* setup to send new data */
5917 		gpio_data = RD32_IO_REG(ha, gpiod);
5918 		gpio_data |= LED_MASK_UPDATE_24;
5919 		WRT32_IO_REG(ha, gpiod, gpio_data);
5920 
5921 		/* read current data and clear out old led data */
5922 		gpio_data = RD32_IO_REG(ha, gpiod);
5923 		gpio_data &= ~LED_MASK_COLORS_24;
5924 
5925 		/* set in the new led data */
5926 		gpio_data |= LEDflags;
5927 
5928 		/* write out the new led data */
5929 		WRT32_IO_REG(ha, gpiod, gpio_data);
5930 
5931 	} else {
5932 		EL(ha, "unsupported HBA: %xh", ha->device_id);
5933 	}
5934 
5935 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5936 }
5937 
5938 /*
5939  * ql_setup_led
5940  *	Setup LED for driver control
5941  *
5942  * Input:
5943  *	ha:	adapter state pointer.
5944  *
5945  * Context:
5946  *	Kernel/Interrupt context.
5947  */
5948 static uint32_t
5949 ql_setup_led(ql_adapter_state_t *ha)
5950 {
5951 	uint32_t	rval;
5952 	ql_mbx_data_t	mr;
5953 
5954 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5955 
5956 	/* decouple the LED control from the fw */
5957 	rval = ql_get_firmware_option(ha, &mr);
5958 	if (rval != QL_SUCCESS) {
5959 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
5960 		return (rval);
5961 	}
5962 
5963 	/* set the appropriate options */
5964 	mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
5965 
5966 	/* send it back to the firmware */
5967 	rval = ql_set_firmware_option(ha, &mr);
5968 	if (rval != QL_SUCCESS) {
5969 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
5970 		return (rval);
5971 	}
5972 
5973 	/* initally, turn the LED's off */
5974 	ql_drive_led(ha, LED_ALL_OFF);
5975 
5976 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5977 
5978 	return (rval);
5979 }
5980 
5981 /*
5982  * ql_wrapup_led
5983  *	Return LED control to the firmware
5984  *
5985  * Input:
5986  *	ha:	adapter state pointer.
5987  *
5988  * Context:
5989  *	Kernel/Interrupt context.
5990  */
5991 static uint32_t
5992 ql_wrapup_led(ql_adapter_state_t *ha)
5993 {
5994 	uint32_t	rval;
5995 	ql_mbx_data_t	mr;
5996 
5997 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5998 
5999 	/* Turn all LED's off */
6000 	ql_drive_led(ha, LED_ALL_OFF);
6001 
6002 	if (CFG_IST(ha, CFG_CTRL_242581)) {
6003 
6004 		uint32_t	gpio_data;
6005 
6006 		/* disable the LED update mask */
6007 		gpio_data = RD32_IO_REG(ha, gpiod);
6008 		gpio_data &= ~LED_MASK_UPDATE_24;
6009 
6010 		/* write out the data */
6011 		WRT32_IO_REG(ha, gpiod, gpio_data);
6012 	}
6013 
6014 	/* give LED control back to the f/w */
6015 	rval = ql_get_firmware_option(ha, &mr);
6016 	if (rval != QL_SUCCESS) {
6017 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6018 		return (rval);
6019 	}
6020 
6021 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6022 
6023 	rval = ql_set_firmware_option(ha, &mr);
6024 	if (rval != QL_SUCCESS) {
6025 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6026 		return (rval);
6027 	}
6028 
6029 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6030 
6031 	return (rval);
6032 }
6033 
6034 /*
6035  * ql_get_port_summary
6036  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6037  *
6038  *	The EXT_IOCTL->RequestAdr points to a single
6039  *	UINT32 which identifies the device type.
6040  *
6041  * Input:
6042  *	ha:	adapter state pointer.
6043  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6044  *	mode:	flags.
6045  *
6046  * Returns:
6047  *	None, request status indicated in cmd->Status.
6048  *
6049  * Context:
6050  *	Kernel context.
6051  */
6052 static void
6053 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6054 {
6055 	EXT_DEVICEDATA		dd = {0};
6056 	EXT_DEVICEDATA		*uddp;
6057 	ql_link_t		*link;
6058 	ql_tgt_t		*tq;
6059 	uint32_t		rlen, dev_type, index;
6060 	int			rval = 0;
6061 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6062 
6063 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6064 
6065 	ddep = &dd.EntryList[0];
6066 
6067 	/*
6068 	 * Get the type of device the requestor is looking for.
6069 	 *
6070 	 * We ignore this for now.
6071 	 */
6072 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6073 	    (void *)&dev_type, sizeof (dev_type), mode);
6074 	if (rval != 0) {
6075 		cmd->Status = EXT_STATUS_COPY_ERR;
6076 		cmd->ResponseLen = 0;
6077 		EL(ha, "failed, ddi_copyin\n");
6078 		return;
6079 	}
6080 	/*
6081 	 * Count the number of entries to be returned. Count devices
6082 	 * that are offlline, but have been persistently bound.
6083 	 */
6084 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6085 		for (link = ha->dev[index].first; link != NULL;
6086 		    link = link->next) {
6087 			tq = link->base_address;
6088 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6089 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6090 				continue;	/* Skip this one */
6091 			}
6092 			dd.TotalDevices++;
6093 		}
6094 	}
6095 	/*
6096 	 * Compute the number of entries that can be returned
6097 	 * based upon the size of caller's response buffer.
6098 	 */
6099 	dd.ReturnListEntryCount = 0;
6100 	if (dd.TotalDevices == 0) {
6101 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6102 	} else {
6103 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6104 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6105 	}
6106 	if (rlen > cmd->ResponseLen) {
6107 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6108 		cmd->DetailStatus = rlen;
6109 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6110 		    rlen, cmd->ResponseLen);
6111 		cmd->ResponseLen = 0;
6112 		return;
6113 	}
6114 	cmd->ResponseLen = 0;
6115 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6116 	uddep = &uddp->EntryList[0];
6117 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6118 		for (link = ha->dev[index].first; link != NULL;
6119 		    link = link->next) {
6120 			tq = link->base_address;
6121 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6122 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6123 				continue;	/* Skip this one */
6124 			}
6125 
6126 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6127 
6128 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6129 			bcopy(tq->port_name, ddep->PortWWN, 8);
6130 
6131 			ddep->PortID[0] = tq->d_id.b.domain;
6132 			ddep->PortID[1] = tq->d_id.b.area;
6133 			ddep->PortID[2] = tq->d_id.b.al_pa;
6134 
6135 			bcopy(tq->port_name,
6136 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6137 
6138 			ddep->DeviceFlags = tq->flags;
6139 			ddep->LoopID = tq->loop_id;
6140 			QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6141 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6142 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6143 			    ha->instance, ddep->TargetAddress.Target,
6144 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6145 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6146 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6147 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6148 			    ddep->PortWWN[0], ddep->PortWWN[1],
6149 			    ddep->PortWWN[2], ddep->PortWWN[3],
6150 			    ddep->PortWWN[4], ddep->PortWWN[5],
6151 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6152 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6153 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6154 
6155 			if (rval != 0) {
6156 				cmd->Status = EXT_STATUS_COPY_ERR;
6157 				cmd->ResponseLen = 0;
6158 				EL(ha, "failed, ddi_copyout\n");
6159 				break;
6160 			}
6161 			dd.ReturnListEntryCount++;
6162 			uddep++;
6163 			cmd->ResponseLen += (uint32_t)
6164 			    sizeof (EXT_DEVICEDATAENTRY);
6165 		}
6166 	}
6167 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6168 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6169 
6170 	if (rval != 0) {
6171 		cmd->Status = EXT_STATUS_COPY_ERR;
6172 		cmd->ResponseLen = 0;
6173 		EL(ha, "failed, ddi_copyout-2\n");
6174 	} else {
6175 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6176 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6177 	}
6178 }
6179 
6180 /*
6181  * ql_get_target_id
6182  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6183  *
6184  * Input:
6185  *	ha:	adapter state pointer.
6186  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6187  *	mode:	flags.
6188  *
6189  * Returns:
6190  *	None, request status indicated in cmd->Status.
6191  *
6192  * Context:
6193  *	Kernel context.
6194  */
6195 static void
6196 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6197 {
6198 	uint32_t		rval;
6199 	uint16_t		qlnt;
6200 	EXT_DEST_ADDR		extdestaddr = {0};
6201 	uint8_t			*name;
6202 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6203 	ql_tgt_t		*tq;
6204 
6205 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6206 
6207 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6208 	    (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6209 		EL(ha, "failed, ddi_copyin\n");
6210 		cmd->Status = EXT_STATUS_COPY_ERR;
6211 		cmd->ResponseLen = 0;
6212 		return;
6213 	}
6214 
6215 	qlnt = QLNT_PORT;
6216 	name = wwpn;
6217 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6218 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6219 	    name[5], name[6], name[7]);
6220 
6221 	tq = ql_find_port(ha, name, qlnt);
6222 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6223 		EL(ha, "failed, fc_port not found\n");
6224 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6225 		cmd->ResponseLen = 0;
6226 		return;
6227 	}
6228 
6229 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6230 
6231 	rval = ddi_copyout((void *)&extdestaddr,
6232 	    (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6233 	if (rval != 0) {
6234 		EL(ha, "failed, ddi_copyout\n");
6235 		cmd->Status = EXT_STATUS_COPY_ERR;
6236 		cmd->ResponseLen = 0;
6237 	}
6238 
6239 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6240 }
6241 
6242 /*
6243  * ql_setup_fcache
6244  *	Populates selected flash sections into the cache
6245  *
6246  * Input:
6247  *	ha = adapter state pointer.
6248  *
6249  * Returns:
6250  *	ql local function return status code.
6251  *
6252  * Context:
6253  *	Kernel context.
6254  *
6255  * Note:
6256  *	Driver must be in stalled state prior to entering or
6257  *	add code to this function prior to calling ql_setup_flash()
6258  */
6259 int
6260 ql_setup_fcache(ql_adapter_state_t *ha)
6261 {
6262 	int		rval;
6263 	uint32_t	freadpos = 0;
6264 	uint32_t	fw_done = 0;
6265 	ql_fcache_t	*head = NULL;
6266 	ql_fcache_t	*tail = NULL;
6267 	ql_fcache_t	*ftmp;
6268 
6269 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6270 
6271 	CACHE_LOCK(ha);
6272 
6273 	/* If we already have populated it, rtn */
6274 	if (ha->fcache != NULL) {
6275 		CACHE_UNLOCK(ha);
6276 		EL(ha, "buffer already populated\n");
6277 		return (QL_SUCCESS);
6278 	}
6279 
6280 	ql_flash_nvram_defaults(ha);
6281 
6282 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6283 		CACHE_UNLOCK(ha);
6284 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6285 		return (rval);
6286 	}
6287 
6288 	while (freadpos != 0xffffffff) {
6289 
6290 		/* Allocate & populate this node */
6291 
6292 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6293 			EL(ha, "node alloc failed\n");
6294 			rval = QL_FUNCTION_FAILED;
6295 			break;
6296 		}
6297 
6298 		/* link in the new node */
6299 		if (head == NULL) {
6300 			head = tail = ftmp;
6301 		} else {
6302 			tail->next = ftmp;
6303 			tail = ftmp;
6304 		}
6305 
6306 		/* Do the firmware node first for 24xx/25xx's */
6307 		if (fw_done == 0) {
6308 			if (CFG_IST(ha, CFG_CTRL_242581)) {
6309 				freadpos = ha->flash_fw_addr << 2;
6310 			}
6311 			fw_done = 1;
6312 		}
6313 
6314 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6315 		    freadpos)) != QL_SUCCESS) {
6316 			EL(ha, "failed, 24xx dump_fcode"
6317 			    " pos=%xh rval=%xh\n", freadpos, rval);
6318 			rval = QL_FUNCTION_FAILED;
6319 			break;
6320 		}
6321 
6322 		/* checkout the pci data / format */
6323 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6324 			EL(ha, "flash header incorrect\n");
6325 			rval = QL_FUNCTION_FAILED;
6326 			break;
6327 		}
6328 	}
6329 
6330 	if (rval != QL_SUCCESS) {
6331 		/* release all resources we have */
6332 		ftmp = head;
6333 		while (ftmp != NULL) {
6334 			tail = ftmp->next;
6335 			kmem_free(ftmp->buf, FBUFSIZE);
6336 			kmem_free(ftmp, sizeof (ql_fcache_t));
6337 			ftmp = tail;
6338 		}
6339 
6340 		EL(ha, "failed, done\n");
6341 	} else {
6342 		ha->fcache = head;
6343 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6344 	}
6345 	CACHE_UNLOCK(ha);
6346 
6347 	return (rval);
6348 }
6349 
6350 /*
6351  * ql_update_fcache
6352  *	re-populates updated flash into the fcache. If
6353  *	fcache does not exist (e.g., flash was empty/invalid on
6354  *	boot), this routine will create and the populate it.
6355  *
6356  * Input:
6357  *	ha	= adapter state pointer.
6358  *	*bpf 	= Pointer to flash buffer.
6359  *	bsize	= Size of flash buffer.
6360  *
6361  * Returns:
6362  *
6363  * Context:
6364  *	Kernel context.
6365  */
6366 void
6367 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6368 {
6369 	int		rval = QL_SUCCESS;
6370 	uint32_t	freadpos = 0;
6371 	uint32_t	fw_done = 0;
6372 	ql_fcache_t	*head = NULL;
6373 	ql_fcache_t	*tail = NULL;
6374 	ql_fcache_t	*ftmp;
6375 
6376 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6377 
6378 	while (freadpos != 0xffffffff) {
6379 
6380 		/* Allocate & populate this node */
6381 
6382 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6383 			EL(ha, "node alloc failed\n");
6384 			rval = QL_FUNCTION_FAILED;
6385 			break;
6386 		}
6387 
6388 		/* link in the new node */
6389 		if (head == NULL) {
6390 			head = tail = ftmp;
6391 		} else {
6392 			tail->next = ftmp;
6393 			tail = ftmp;
6394 		}
6395 
6396 		/* Do the firmware node first for 24xx's */
6397 		if (fw_done == 0) {
6398 			if (CFG_IST(ha, CFG_CTRL_242581)) {
6399 				freadpos = ha->flash_fw_addr << 2;
6400 			}
6401 			fw_done = 1;
6402 		}
6403 
6404 		/* read in first FBUFSIZE bytes of this flash section */
6405 		if (freadpos+FBUFSIZE > bsize) {
6406 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6407 			    freadpos, bsize);
6408 			rval = QL_FUNCTION_FAILED;
6409 			break;
6410 		}
6411 		bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6412 
6413 		/* checkout the pci data / format */
6414 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6415 			EL(ha, "flash header incorrect\n");
6416 			rval = QL_FUNCTION_FAILED;
6417 			break;
6418 		}
6419 	}
6420 
6421 	if (rval != QL_SUCCESS) {
6422 		/*
6423 		 * release all resources we have
6424 		 */
6425 		ql_fcache_rel(head);
6426 		EL(ha, "failed, done\n");
6427 	} else {
6428 		/*
6429 		 * Release previous fcache resources and update with new
6430 		 */
6431 		CACHE_LOCK(ha);
6432 		ql_fcache_rel(ha->fcache);
6433 		ha->fcache = head;
6434 		CACHE_UNLOCK(ha);
6435 
6436 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6437 	}
6438 }
6439 
6440 /*
6441  * ql_setup_fnode
6442  *	Allocates fcache node
6443  *
6444  * Input:
6445  *	ha = adapter state pointer.
6446  *	node = point to allocated fcache node (NULL = failed)
6447  *
6448  * Returns:
6449  *
6450  * Context:
6451  *	Kernel context.
6452  *
6453  * Note:
6454  *	Driver must be in stalled state prior to entering or
6455  *	add code to this function prior to calling ql_setup_flash()
6456  */
6457 static ql_fcache_t *
6458 ql_setup_fnode(ql_adapter_state_t *ha)
6459 {
6460 	ql_fcache_t	*fnode = NULL;
6461 
6462 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6463 	    KM_SLEEP))) == NULL) {
6464 		EL(ha, "fnode alloc failed\n");
6465 		fnode = NULL;
6466 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6467 	    KM_SLEEP))) == NULL) {
6468 		EL(ha, "buf alloc failed\n");
6469 		kmem_free(fnode, sizeof (ql_fcache_t));
6470 		fnode = NULL;
6471 	} else {
6472 		fnode->buflen = FBUFSIZE;
6473 	}
6474 
6475 	return (fnode);
6476 }
6477 
6478 /*
6479  * ql_fcache_rel
6480  *	Releases the fcache resources
6481  *
6482  * Input:
6483  *	ha	= adapter state pointer.
6484  *	head	= Pointer to fcache linked list
6485  *
6486  * Returns:
6487  *
6488  * Context:
6489  *	Kernel context.
6490  *
6491  */
6492 void
6493 ql_fcache_rel(ql_fcache_t *head)
6494 {
6495 	ql_fcache_t	*ftmp = head;
6496 	ql_fcache_t	*tail;
6497 
6498 	/* release all resources we have */
6499 	while (ftmp != NULL) {
6500 		tail = ftmp->next;
6501 		kmem_free(ftmp->buf, FBUFSIZE);
6502 		kmem_free(ftmp, sizeof (ql_fcache_t));
6503 		ftmp = tail;
6504 	}
6505 }
6506 
6507 /*
6508  * ql_update_flash_caches
6509  *	Updates driver flash caches
6510  *
6511  * Input:
6512  *	ha:	adapter state pointer.
6513  *
6514  * Context:
6515  *	Kernel context.
6516  */
6517 static void
6518 ql_update_flash_caches(ql_adapter_state_t *ha)
6519 {
6520 	uint32_t		len;
6521 	ql_link_t		*link;
6522 	ql_adapter_state_t	*ha2;
6523 
6524 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6525 
6526 	/* Get base path length. */
6527 	for (len = (uint32_t)strlen(ha->devpath); len; len--) {
6528 		if (ha->devpath[len] == ',' ||
6529 		    ha->devpath[len] == '@') {
6530 			break;
6531 		}
6532 	}
6533 
6534 	/* Reset fcache on all adapter instances. */
6535 	for (link = ql_hba.first; link != NULL; link = link->next) {
6536 		ha2 = link->base_address;
6537 
6538 		if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
6539 			continue;
6540 		}
6541 
6542 		ql_fcache_rel(ha2->fcache);
6543 
6544 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6545 			CACHE_LOCK(ha2);
6546 			if (ha2->vcache != NULL) {
6547 				kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
6548 				ha2->vcache = NULL;
6549 			}
6550 			CACHE_UNLOCK(ha2);
6551 		}
6552 
6553 		(void) ql_setup_fcache(ha2);
6554 	}
6555 
6556 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6557 }
6558 
6559 /*
6560  * ql_get_fbuf
6561  *	Search the fcache list for the type specified
6562  *
6563  * Input:
6564  *	fptr	= Pointer to fcache linked list
6565  *	ftype	= Type of image to be returned.
6566  *
6567  * Returns:
6568  *	Pointer to ql_fcache_t.
6569  *	NULL means not found.
6570  *
6571  * Context:
6572  *	Kernel context.
6573  *
6574  *
6575  */
6576 ql_fcache_t *
6577 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6578 {
6579 	while (fptr != NULL) {
6580 		/* does this image meet criteria? */
6581 		if (ftype & fptr->type) {
6582 			break;
6583 		}
6584 		fptr = fptr->next;
6585 	}
6586 	return (fptr);
6587 }
6588 
6589 /*
6590  * ql_check_pci
6591  *
6592  *	checks the passed buffer for a valid pci signature and
6593  *	expected (and in range) pci length values.
6594  *
6595  *	For firmware type, a pci header is added since the image in
6596  *	the flash does not have one (!!!).
6597  *
6598  *	On successful pci check, nextpos adjusted to next pci header.
6599  *
6600  * Returns:
6601  *	-1 --> last pci image
6602  *	0 --> pci header valid
6603  *	1 --> pci header invalid.
6604  *
6605  * Context:
6606  *	Kernel context.
6607  */
6608 static int
6609 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6610 {
6611 	pci_header_t	*pcih;
6612 	pci_data_t	*pcid;
6613 	uint32_t	doff;
6614 	uint8_t		*pciinfo;
6615 
6616 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6617 
6618 	if (fcache != NULL) {
6619 		pciinfo = fcache->buf;
6620 	} else {
6621 		EL(ha, "failed, null fcache ptr passed\n");
6622 		return (1);
6623 	}
6624 
6625 	if (pciinfo == NULL) {
6626 		EL(ha, "failed, null pciinfo ptr passed\n");
6627 		return (1);
6628 	}
6629 
6630 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
6631 		caddr_t	bufp;
6632 		uint_t	len;
6633 
6634 		if (pciinfo[0] != SBUS_CODE_FCODE) {
6635 			EL(ha, "failed, unable to detect sbus fcode\n");
6636 			return (1);
6637 		}
6638 		fcache->type = FTYPE_FCODE;
6639 
6640 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6641 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6642 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6643 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6644 		    (int *)&len) == DDI_PROP_SUCCESS) {
6645 
6646 			(void) snprintf(fcache->verstr,
6647 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6648 			kmem_free(bufp, len);
6649 		}
6650 
6651 		*nextpos = 0xffffffff;
6652 
6653 		QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n",
6654 		    ha->instance);
6655 
6656 		return (0);
6657 	}
6658 
6659 	if (*nextpos == ha->flash_fw_addr << 2) {
6660 
6661 		pci_header_t	fwh = {0};
6662 		pci_data_t	fwd = {0};
6663 		uint8_t		*buf, *bufp;
6664 
6665 		/*
6666 		 * Build a pci header for the firmware module
6667 		 */
6668 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6669 		    NULL) {
6670 			EL(ha, "failed, unable to allocate buffer\n");
6671 			return (1);
6672 		}
6673 
6674 		fwh.signature[0] = PCI_HEADER0;
6675 		fwh.signature[1] = PCI_HEADER1;
6676 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6677 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6678 
6679 		fwd.signature[0] = 'P';
6680 		fwd.signature[1] = 'C';
6681 		fwd.signature[2] = 'I';
6682 		fwd.signature[3] = 'R';
6683 		fwd.codetype = PCI_CODE_FW;
6684 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6685 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6686 
6687 		bufp = buf;
6688 		bcopy(&fwh, bufp, sizeof (pci_header_t));
6689 		bufp += sizeof (pci_header_t);
6690 		bcopy(&fwd, bufp, sizeof (pci_data_t));
6691 		bufp += sizeof (pci_data_t);
6692 
6693 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6694 		    sizeof (pci_data_t)));
6695 		bcopy(buf, fcache->buf, FBUFSIZE);
6696 
6697 		fcache->type = FTYPE_FW;
6698 
6699 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6700 		    "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6701 		    fcache->buf[27]);
6702 
6703 		*nextpos = CFG_IST(ha, CFG_CTRL_81XX) ? 0x200000 : 0;
6704 		kmem_free(buf, FBUFSIZE);
6705 
6706 		QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance);
6707 
6708 		return (0);
6709 	}
6710 
6711 	/* get to the pci header image length */
6712 	pcih = (pci_header_t *)pciinfo;
6713 
6714 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6715 
6716 	/* some header section sanity check */
6717 	if (pcih->signature[0] != PCI_HEADER0 ||
6718 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6719 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6720 		    pcih->signature[0], pcih->signature[1], doff);
6721 		return (1);
6722 	}
6723 
6724 	pcid = (pci_data_t *)(pciinfo + doff);
6725 
6726 	/* a slight sanity data section check */
6727 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6728 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6729 		EL(ha, "failed, data sig mismatch!\n");
6730 		return (1);
6731 	}
6732 
6733 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6734 		EL(ha, "last image\n");
6735 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6736 			ql_flash_layout_table(ha, *nextpos +
6737 			    (pcid->imagelength[0] | (pcid->imagelength[1] <<
6738 			    8)) * PCI_SECTOR_SIZE);
6739 			ql_24xx_flash_desc(ha);
6740 		}
6741 		*nextpos = 0xffffffff;
6742 	} else {
6743 		/* adjust the next flash read start position */
6744 		*nextpos += (pcid->imagelength[0] |
6745 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6746 	}
6747 
6748 	switch (pcid->codetype) {
6749 	case PCI_CODE_X86PC:
6750 		fcache->type = FTYPE_BIOS;
6751 		break;
6752 	case PCI_CODE_FCODE:
6753 		fcache->type = FTYPE_FCODE;
6754 		break;
6755 	case PCI_CODE_EFI:
6756 		fcache->type = FTYPE_EFI;
6757 		break;
6758 	case PCI_CODE_HPPA:
6759 		fcache->type = FTYPE_HPPA;
6760 		break;
6761 	default:
6762 		fcache->type = FTYPE_UNKNOWN;
6763 		break;
6764 	}
6765 
6766 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6767 	    "%d.%d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6768 
6769 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6770 
6771 	return (0);
6772 }
6773 
6774 /*
6775  * ql_flash_layout_table
6776  *	Obtains flash addresses from table
6777  *
6778  * Input:
6779  *	ha:		adapter state pointer.
6780  *	flt_paddr:	flash layout pointer address.
6781  *
6782  * Context:
6783  *	Kernel context.
6784  */
6785 static void
6786 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
6787 {
6788 	ql_flt_ptr_t	*fptr;
6789 	ql_flt_hdr_t	*fhdr;
6790 	ql_flt_region_t	*frgn;
6791 	uint8_t		*bp;
6792 	int		rval;
6793 	uint32_t	len, faddr, cnt;
6794 	uint16_t	chksum, w16;
6795 
6796 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6797 
6798 	/* Process flash layout table header */
6799 	if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
6800 		EL(ha, "kmem_zalloc=null\n");
6801 		return;
6802 	}
6803 
6804 	/* Process pointer to flash layout table */
6805 	if ((rval = ql_dump_fcode(ha, bp, sizeof (ql_flt_ptr_t), flt_paddr)) !=
6806 	    QL_SUCCESS) {
6807 		EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
6808 		    rval);
6809 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6810 		return;
6811 	}
6812 	fptr = (ql_flt_ptr_t *)bp;
6813 
6814 	/* Verify pointer to flash layout table. */
6815 	for (chksum = 0, cnt = 0; cnt < sizeof (ql_flt_ptr_t); cnt += 2) {
6816 		w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6817 		chksum += w16;
6818 	}
6819 	if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
6820 	    fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
6821 		EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0],
6822 		    fptr->sig[1], fptr->sig[2], fptr->sig[3]);
6823 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6824 		return;
6825 	}
6826 	faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
6827 	    fptr->addr[3]);
6828 
6829 	/* Process flash layout table. */
6830 	if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
6831 	    QL_SUCCESS) {
6832 		EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
6833 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6834 		return;
6835 	}
6836 	fhdr = (ql_flt_hdr_t *)bp;
6837 
6838 	/* Verify flash layout table. */
6839 	len = (uint16_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
6840 	    sizeof (ql_flt_hdr_t));
6841 	if (len > FLASH_LAYOUT_TABLE_SIZE) {
6842 		chksum = 0xffff;
6843 	} else {
6844 		for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6845 			w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6846 			chksum += w16;
6847 		}
6848 	}
6849 	w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
6850 	if (chksum != 0 || w16 != 1) {
6851 		EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
6852 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6853 		return;
6854 	}
6855 
6856 	/* Process flash layout table regions */
6857 	for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
6858 	    (caddr_t)frgn < (caddr_t)(bp + FLASH_LAYOUT_TABLE_SIZE); frgn++) {
6859 		faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
6860 		    frgn->beg_addr[2], frgn->beg_addr[3]);
6861 		faddr >>= 2;
6862 
6863 		switch (frgn->region) {
6864 		case FLASH_FW_REGION:
6865 			ha->flash_fw_addr = faddr;
6866 			QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh\n",
6867 			    ha->instance, faddr);
6868 			break;
6869 		case FLASH_GOLDEN_FW_REGION:
6870 			ha->flash_golden_fw_addr = faddr;
6871 			QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n",
6872 			    ha->instance, faddr);
6873 			break;
6874 		case FLASH_VPD_0_REGION:
6875 			if (!(ha->flags & FUNCTION_1)) {
6876 				ha->flash_vpd_addr = faddr;
6877 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
6878 				    "\n", ha->instance, faddr);
6879 			}
6880 			break;
6881 		case FLASH_NVRAM_0_REGION:
6882 			if (!(ha->flags & FUNCTION_1)) {
6883 				ha->flash_nvram_addr = faddr;
6884 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
6885 				    "%xh\n", ha->instance, faddr);
6886 			}
6887 			break;
6888 		case FLASH_VPD_1_REGION:
6889 			if (ha->flags & FUNCTION_1) {
6890 				ha->flash_vpd_addr = faddr;
6891 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
6892 				    "\n", ha->instance, faddr);
6893 			}
6894 			break;
6895 		case FLASH_NVRAM_1_REGION:
6896 			if (ha->flags & FUNCTION_1) {
6897 				ha->flash_nvram_addr = faddr;
6898 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
6899 				    "%xh\n", ha->instance, faddr);
6900 			}
6901 			break;
6902 		case FLASH_DESC_TABLE_REGION:
6903 			ha->flash_desc_addr = faddr;
6904 			QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr=%xh\n",
6905 			    ha->instance, faddr);
6906 			break;
6907 		case FLASH_ERROR_LOG_0_REGION:
6908 			if (!(ha->flags & FUNCTION_1)) {
6909 				ha->flash_errlog_start = faddr;
6910 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
6911 				    "%xh\n", ha->instance, faddr);
6912 			}
6913 			break;
6914 		case FLASH_ERROR_LOG_1_REGION:
6915 			if (ha->flags & FUNCTION_1) {
6916 				ha->flash_errlog_start = faddr;
6917 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
6918 				    "%xh\n", ha->instance, faddr);
6919 			}
6920 			break;
6921 		default:
6922 			break;
6923 		}
6924 	}
6925 	kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6926 
6927 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6928 }
6929 
6930 /*
6931  * ql_flash_nvram_defaults
6932  *	Flash default addresses.
6933  *
6934  * Input:
6935  *	ha:		adapter state pointer.
6936  *
6937  * Returns:
6938  *	ql local function return status code.
6939  *
6940  * Context:
6941  *	Kernel context.
6942  */
6943 static void
6944 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
6945 {
6946 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6947 
6948 	if (ha->flags & FUNCTION_1) {
6949 		if (CFG_IST(ha, CFG_CTRL_2300)) {
6950 			ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
6951 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
6952 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
6953 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6954 			ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
6955 			ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
6956 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
6957 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
6958 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
6959 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
6960 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6961 			ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
6962 			ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
6963 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
6964 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
6965 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
6966 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
6967 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
6968 			ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
6969 			ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
6970 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
6971 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
6972 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
6973 		}
6974 	} else {
6975 		if (CFG_IST(ha, CFG_CTRL_2200)) {
6976 			ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
6977 			ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
6978 		} else if (CFG_IST(ha, CFG_CTRL_2300)) {
6979 			ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
6980 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
6981 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
6982 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6983 			ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
6984 			ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
6985 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
6986 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
6987 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
6988 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
6989 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6990 			ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
6991 			ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
6992 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
6993 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
6994 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
6995 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
6996 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
6997 			ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
6998 			ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
6999 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7000 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7001 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7002 		}
7003 	}
7004 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7005 }
7006 
7007 /*
7008  * ql_get_sfp
7009  *	Returns sfp data to sdmapi caller
7010  *
7011  * Input:
7012  *	ha:	adapter state pointer.
7013  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7014  *	mode:	flags.
7015  *
7016  * Returns:
7017  *	None, request status indicated in cmd->Status.
7018  *
7019  * Context:
7020  *	Kernel context.
7021  */
7022 static void
7023 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7024 {
7025 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7026 
7027 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
7028 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7029 		EL(ha, "failed, invalid request for HBA\n");
7030 		return;
7031 	}
7032 
7033 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7034 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7035 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
7036 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7037 		    cmd->ResponseLen);
7038 		return;
7039 	}
7040 
7041 	/* Dump SFP data in user buffer */
7042 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7043 	    mode)) != 0) {
7044 		cmd->Status = EXT_STATUS_COPY_ERR;
7045 		EL(ha, "failed, copy error\n");
7046 	} else {
7047 		cmd->Status = EXT_STATUS_OK;
7048 	}
7049 
7050 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7051 }
7052 
7053 /*
7054  * ql_dump_sfp
7055  *	Dumps SFP.
7056  *
7057  * Input:
7058  *	ha:	adapter state pointer.
7059  *	bp:	buffer address.
7060  *	mode:	flags
7061  *
7062  * Returns:
7063  *
7064  * Context:
7065  *	Kernel context.
7066  */
7067 static int
7068 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7069 {
7070 	dma_mem_t	mem;
7071 	uint32_t	cnt;
7072 	int		rval2, rval = 0;
7073 	uint32_t	dxfer;
7074 
7075 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7076 
7077 	/* Get memory for SFP. */
7078 
7079 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7080 	    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7081 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7082 		return (ENOMEM);
7083 	}
7084 
7085 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7086 		rval2 = ql_read_sfp(ha, &mem,
7087 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7088 		    (uint16_t)(cnt & 0xff));
7089 		if (rval2 != QL_SUCCESS) {
7090 			EL(ha, "failed, read_sfp=%xh\n", rval2);
7091 			rval = EFAULT;
7092 			break;
7093 		}
7094 
7095 		/* copy the data back */
7096 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7097 		    mode)) != mem.size) {
7098 			/* ddi copy error */
7099 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7100 			rval = EFAULT;
7101 			break;
7102 		}
7103 
7104 		/* adjust the buffer pointer */
7105 		bp = (caddr_t)bp + mem.size;
7106 	}
7107 
7108 	ql_free_phys(ha, &mem);
7109 
7110 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7111 
7112 	return (rval);
7113 }
7114 
7115 /*
7116  * ql_port_param
7117  *	Retrieves or sets the firmware port speed settings
7118  *
7119  * Input:
7120  *	ha:	adapter state pointer.
7121  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7122  *	mode:	flags.
7123  *
7124  * Returns:
7125  *	None, request status indicated in cmd->Status.
7126  *
7127  * Context:
7128  *	Kernel context.
7129  *
7130  */
7131 static void
7132 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7133 {
7134 	uint8_t			*name;
7135 	ql_tgt_t		*tq;
7136 	EXT_PORT_PARAM		port_param = {0};
7137 	uint32_t		rval = QL_SUCCESS;
7138 	uint32_t		idma_rate;
7139 
7140 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7141 
7142 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7143 		EL(ha, "invalid request for this HBA\n");
7144 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7145 		cmd->ResponseLen = 0;
7146 		return;
7147 	}
7148 
7149 	if (LOOP_NOT_READY(ha)) {
7150 		EL(ha, "failed, loop not ready\n");
7151 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7152 		cmd->ResponseLen = 0;
7153 		return;
7154 	}
7155 
7156 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7157 	    (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7158 		EL(ha, "failed, ddi_copyin\n");
7159 		cmd->Status = EXT_STATUS_COPY_ERR;
7160 		cmd->ResponseLen = 0;
7161 		return;
7162 	}
7163 
7164 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7165 		EL(ha, "Unsupported dest lookup type: %xh\n",
7166 		    port_param.FCScsiAddr.DestType);
7167 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7168 		cmd->ResponseLen = 0;
7169 		return;
7170 	}
7171 
7172 	name = port_param.FCScsiAddr.DestAddr.WWPN;
7173 
7174 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7175 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
7176 	    name[5], name[6], name[7]);
7177 
7178 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7179 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
7180 		EL(ha, "failed, fc_port not found\n");
7181 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7182 		cmd->ResponseLen = 0;
7183 		return;
7184 	}
7185 
7186 	cmd->Status = EXT_STATUS_OK;
7187 	cmd->DetailStatus = EXT_STATUS_OK;
7188 
7189 	switch (port_param.Mode) {
7190 	case EXT_IIDMA_MODE_GET:
7191 		/*
7192 		 * Report the firmware's port rate for the wwpn
7193 		 */
7194 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7195 		    port_param.Mode);
7196 
7197 		if (rval != QL_SUCCESS) {
7198 			EL(ha, "iidma get failed: %xh\n", rval);
7199 			cmd->Status = EXT_STATUS_MAILBOX;
7200 			cmd->DetailStatus = rval;
7201 			cmd->ResponseLen = 0;
7202 		} else {
7203 			switch (idma_rate) {
7204 			case IIDMA_RATE_1GB:
7205 				port_param.Speed =
7206 				    EXT_DEF_PORTSPEED_1GBIT;
7207 				break;
7208 			case IIDMA_RATE_2GB:
7209 				port_param.Speed =
7210 				    EXT_DEF_PORTSPEED_2GBIT;
7211 				break;
7212 			case IIDMA_RATE_4GB:
7213 				port_param.Speed =
7214 				    EXT_DEF_PORTSPEED_4GBIT;
7215 				break;
7216 			case IIDMA_RATE_8GB:
7217 				port_param.Speed =
7218 				    EXT_DEF_PORTSPEED_8GBIT;
7219 				break;
7220 			case IIDMA_RATE_10GB:
7221 				port_param.Speed =
7222 				    EXT_DEF_PORTSPEED_10GBIT;
7223 				break;
7224 			default:
7225 				port_param.Speed =
7226 				    EXT_DEF_PORTSPEED_UNKNOWN;
7227 				EL(ha, "failed, Port speed rate=%xh\n",
7228 				    idma_rate);
7229 				break;
7230 			}
7231 
7232 			/* Copy back the data */
7233 			rval = ddi_copyout((void *)&port_param,
7234 			    (void *)(uintptr_t)cmd->ResponseAdr,
7235 			    sizeof (EXT_PORT_PARAM), mode);
7236 
7237 			if (rval != 0) {
7238 				cmd->Status = EXT_STATUS_COPY_ERR;
7239 				cmd->ResponseLen = 0;
7240 				EL(ha, "failed, ddi_copyout\n");
7241 			} else {
7242 				cmd->ResponseLen = (uint32_t)
7243 				    sizeof (EXT_PORT_PARAM);
7244 			}
7245 		}
7246 		break;
7247 
7248 	case EXT_IIDMA_MODE_SET:
7249 		/*
7250 		 * Set the firmware's port rate for the wwpn
7251 		 */
7252 		switch (port_param.Speed) {
7253 		case EXT_DEF_PORTSPEED_1GBIT:
7254 			idma_rate = IIDMA_RATE_1GB;
7255 			break;
7256 		case EXT_DEF_PORTSPEED_2GBIT:
7257 			idma_rate = IIDMA_RATE_2GB;
7258 			break;
7259 		case EXT_DEF_PORTSPEED_4GBIT:
7260 			idma_rate = IIDMA_RATE_4GB;
7261 			break;
7262 		case EXT_DEF_PORTSPEED_8GBIT:
7263 			idma_rate = IIDMA_RATE_8GB;
7264 			break;
7265 		case EXT_DEF_PORTSPEED_10GBIT:
7266 			port_param.Speed = IIDMA_RATE_10GB;
7267 			break;
7268 		default:
7269 			EL(ha, "invalid set iidma rate: %x\n",
7270 			    port_param.Speed);
7271 			cmd->Status = EXT_STATUS_INVALID_PARAM;
7272 			cmd->ResponseLen = 0;
7273 			rval = QL_PARAMETER_ERROR;
7274 			break;
7275 		}
7276 
7277 		if (rval == QL_SUCCESS) {
7278 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7279 			    port_param.Mode);
7280 			if (rval != QL_SUCCESS) {
7281 				EL(ha, "iidma set failed: %xh\n", rval);
7282 				cmd->Status = EXT_STATUS_MAILBOX;
7283 				cmd->DetailStatus = rval;
7284 				cmd->ResponseLen = 0;
7285 			}
7286 		}
7287 		break;
7288 	default:
7289 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
7290 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7291 		cmd->ResponseLen = 0;
7292 		cmd->DetailStatus = 0;
7293 		break;
7294 	}
7295 
7296 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7297 }
7298 
7299 /*
7300  * ql_get_fwexttrace
7301  *	Dumps f/w extended trace buffer
7302  *
7303  * Input:
7304  *	ha:	adapter state pointer.
7305  *	bp:	buffer address.
7306  *	mode:	flags
7307  *
7308  * Returns:
7309  *
7310  * Context:
7311  *	Kernel context.
7312  */
7313 /* ARGSUSED */
7314 static void
7315 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7316 {
7317 	int	rval;
7318 	caddr_t	payload;
7319 
7320 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7321 
7322 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7323 		EL(ha, "invalid request for this HBA\n");
7324 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7325 		cmd->ResponseLen = 0;
7326 		return;
7327 	}
7328 
7329 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7330 	    (ha->fwexttracebuf.bp == NULL)) {
7331 		EL(ha, "f/w extended trace is not enabled\n");
7332 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7333 		cmd->ResponseLen = 0;
7334 		return;
7335 	}
7336 
7337 	if (cmd->ResponseLen < FWEXTSIZE) {
7338 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7339 		cmd->DetailStatus = FWEXTSIZE;
7340 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7341 		    cmd->ResponseLen, FWEXTSIZE);
7342 		cmd->ResponseLen = 0;
7343 		return;
7344 	}
7345 
7346 	/* Time Stamp */
7347 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7348 	if (rval != QL_SUCCESS) {
7349 		EL(ha, "f/w extended trace insert"
7350 		    "time stamp failed: %xh\n", rval);
7351 		cmd->Status = EXT_STATUS_ERR;
7352 		cmd->ResponseLen = 0;
7353 		return;
7354 	}
7355 
7356 	/* Disable Tracing */
7357 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7358 	if (rval != QL_SUCCESS) {
7359 		EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7360 		cmd->Status = EXT_STATUS_ERR;
7361 		cmd->ResponseLen = 0;
7362 		return;
7363 	}
7364 
7365 	/* Allocate payload buffer */
7366 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7367 	if (payload == NULL) {
7368 		EL(ha, "failed, kmem_zalloc\n");
7369 		cmd->Status = EXT_STATUS_NO_MEMORY;
7370 		cmd->ResponseLen = 0;
7371 		return;
7372 	}
7373 
7374 	/* Sync DMA buffer. */
7375 	(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7376 	    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7377 
7378 	/* Copy trace buffer data. */
7379 	ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7380 	    (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7381 	    DDI_DEV_AUTOINCR);
7382 
7383 	/* Send payload to application. */
7384 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7385 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7386 		EL(ha, "failed, send_buffer_data\n");
7387 		cmd->Status = EXT_STATUS_COPY_ERR;
7388 		cmd->ResponseLen = 0;
7389 	} else {
7390 		cmd->Status = EXT_STATUS_OK;
7391 	}
7392 
7393 	kmem_free(payload, FWEXTSIZE);
7394 
7395 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7396 }
7397 
7398 /*
7399  * ql_get_fwfcetrace
7400  *	Dumps f/w fibre channel event trace buffer
7401  *
7402  * Input:
7403  *	ha:	adapter state pointer.
7404  *	bp:	buffer address.
7405  *	mode:	flags
7406  *
7407  * Returns:
7408  *
7409  * Context:
7410  *	Kernel context.
7411  */
7412 /* ARGSUSED */
7413 static void
7414 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7415 {
7416 	int	rval;
7417 	caddr_t	payload;
7418 
7419 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7420 
7421 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7422 		EL(ha, "invalid request for this HBA\n");
7423 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7424 		cmd->ResponseLen = 0;
7425 		return;
7426 	}
7427 
7428 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7429 	    (ha->fwfcetracebuf.bp == NULL)) {
7430 		EL(ha, "f/w FCE trace is not enabled\n");
7431 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7432 		cmd->ResponseLen = 0;
7433 		return;
7434 	}
7435 
7436 	if (cmd->ResponseLen < FWFCESIZE) {
7437 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7438 		cmd->DetailStatus = FWFCESIZE;
7439 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7440 		    cmd->ResponseLen, FWFCESIZE);
7441 		cmd->ResponseLen = 0;
7442 		return;
7443 	}
7444 
7445 	/* Disable Tracing */
7446 	rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7447 	if (rval != QL_SUCCESS) {
7448 		EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7449 		cmd->Status = EXT_STATUS_ERR;
7450 		cmd->ResponseLen = 0;
7451 		return;
7452 	}
7453 
7454 	/* Allocate payload buffer */
7455 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7456 	if (payload == NULL) {
7457 		EL(ha, "failed, kmem_zalloc\n");
7458 		cmd->Status = EXT_STATUS_NO_MEMORY;
7459 		cmd->ResponseLen = 0;
7460 		return;
7461 	}
7462 
7463 	/* Sync DMA buffer. */
7464 	(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7465 	    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7466 
7467 	/* Copy trace buffer data. */
7468 	ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7469 	    (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7470 	    DDI_DEV_AUTOINCR);
7471 
7472 	/* Send payload to application. */
7473 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7474 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7475 		EL(ha, "failed, send_buffer_data\n");
7476 		cmd->Status = EXT_STATUS_COPY_ERR;
7477 		cmd->ResponseLen = 0;
7478 	} else {
7479 		cmd->Status = EXT_STATUS_OK;
7480 	}
7481 
7482 	kmem_free(payload, FWFCESIZE);
7483 
7484 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7485 }
7486 
7487 /*
7488  * ql_get_pci_data
7489  *	Retrieves pci config space data
7490  *
7491  * Input:
7492  *	ha:	adapter state pointer.
7493  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7494  *	mode:	flags.
7495  *
7496  * Returns:
7497  *	None, request status indicated in cmd->Status.
7498  *
7499  * Context:
7500  *	Kernel context.
7501  *
7502  */
7503 static void
7504 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7505 {
7506 	uint8_t		cap_ptr;
7507 	uint8_t		cap_id;
7508 	uint32_t	buf_size = 256;
7509 
7510 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7511 
7512 	/*
7513 	 * First check the "Capabilities List" bit of the status register.
7514 	 */
7515 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7516 		/*
7517 		 * Now get the capability pointer
7518 		 */
7519 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7520 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7521 			/*
7522 			 * Check for the pcie capability.
7523 			 */
7524 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7525 			if (cap_id == PCI_CAP_ID_PCI_E) {
7526 				buf_size = 4096;
7527 				break;
7528 			}
7529 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7530 			    (cap_ptr + PCI_CAP_NEXT_PTR));
7531 		}
7532 	}
7533 
7534 	if (cmd->ResponseLen < buf_size) {
7535 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7536 		cmd->DetailStatus = buf_size;
7537 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7538 		    cmd->ResponseLen);
7539 		return;
7540 	}
7541 
7542 	/* Dump PCI config data. */
7543 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7544 	    buf_size, mode)) != 0) {
7545 		cmd->Status = EXT_STATUS_COPY_ERR;
7546 		cmd->DetailStatus = 0;
7547 		EL(ha, "failed, copy err pci_dump\n");
7548 	} else {
7549 		cmd->Status = EXT_STATUS_OK;
7550 		cmd->DetailStatus = buf_size;
7551 	}
7552 
7553 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7554 }
7555 
7556 /*
7557  * ql_pci_dump
7558  *	Dumps PCI config data to application buffer.
7559  *
7560  * Input:
7561  *	ha = adapter state pointer.
7562  *	bp = user buffer address.
7563  *
7564  * Returns:
7565  *
7566  * Context:
7567  *	Kernel context.
7568  */
7569 int
7570 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7571 {
7572 	uint32_t	pci_os;
7573 	uint32_t	*ptr32, *org_ptr32;
7574 
7575 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7576 
7577 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7578 	if (ptr32 == NULL) {
7579 		EL(ha, "failed kmem_zalloc\n");
7580 		return (ENOMEM);
7581 	}
7582 
7583 	/* store the initial value of ptr32 */
7584 	org_ptr32 = ptr32;
7585 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7586 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7587 		LITTLE_ENDIAN_32(ptr32);
7588 		ptr32++;
7589 	}
7590 
7591 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7592 	    0) {
7593 		EL(ha, "failed ddi_copyout\n");
7594 		kmem_free(org_ptr32, pci_size);
7595 		return (EFAULT);
7596 	}
7597 
7598 	QL_DUMP_9(org_ptr32, 8, pci_size);
7599 
7600 	kmem_free(org_ptr32, pci_size);
7601 
7602 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7603 
7604 	return (0);
7605 }
7606 
7607 /*
7608  * ql_menlo_reset
7609  *	Reset Menlo
7610  *
7611  * Input:
7612  *	ha:	adapter state pointer.
7613  *	bp:	buffer address.
7614  *	mode:	flags
7615  *
7616  * Returns:
7617  *
7618  * Context:
7619  *	Kernel context.
7620  */
7621 static void
7622 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7623 {
7624 	EXT_MENLO_RESET	rst;
7625 	ql_mbx_data_t	mr;
7626 	int		rval;
7627 
7628 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7629 
7630 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7631 		EL(ha, "failed, invalid request for HBA\n");
7632 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7633 		cmd->ResponseLen = 0;
7634 		return;
7635 	}
7636 
7637 	/*
7638 	 * TODO: only vp_index 0 can do this (?)
7639 	 */
7640 
7641 	/*  Verify the size of request structure. */
7642 	if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
7643 		/* Return error */
7644 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7645 		    sizeof (EXT_MENLO_RESET));
7646 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7647 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7648 		cmd->ResponseLen = 0;
7649 		return;
7650 	}
7651 
7652 	/* Get reset request. */
7653 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7654 	    (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) {
7655 		EL(ha, "failed, ddi_copyin\n");
7656 		cmd->Status = EXT_STATUS_COPY_ERR;
7657 		cmd->ResponseLen = 0;
7658 		return;
7659 	}
7660 
7661 	/* Wait for I/O to stop and daemon to stall. */
7662 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7663 		EL(ha, "ql_stall_driver failed\n");
7664 		ql_restart_hba(ha);
7665 		cmd->Status = EXT_STATUS_BUSY;
7666 		cmd->ResponseLen = 0;
7667 		return;
7668 	}
7669 
7670 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
7671 	if (rval != QL_SUCCESS) {
7672 		EL(ha, "failed, status=%xh\n", rval);
7673 		cmd->Status = EXT_STATUS_MAILBOX;
7674 		cmd->DetailStatus = rval;
7675 		cmd->ResponseLen = 0;
7676 	} else if (mr.mb[1] != 0) {
7677 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
7678 		cmd->Status = EXT_STATUS_ERR;
7679 		cmd->DetailStatus = mr.mb[1];
7680 		cmd->ResponseLen = 0;
7681 	}
7682 
7683 	ql_restart_hba(ha);
7684 
7685 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7686 }
7687 
7688 /*
7689  * ql_menlo_get_fw_version
7690  *	Get Menlo firmware version.
7691  *
7692  * Input:
7693  *	ha:	adapter state pointer.
7694  *	bp:	buffer address.
7695  *	mode:	flags
7696  *
7697  * Returns:
7698  *
7699  * Context:
7700  *	Kernel context.
7701  */
7702 static void
7703 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7704 {
7705 	int				rval;
7706 	ql_mbx_iocb_t			*pkt;
7707 	EXT_MENLO_GET_FW_VERSION	ver = {0};
7708 
7709 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7710 
7711 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7712 		EL(ha, "failed, invalid request for HBA\n");
7713 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7714 		cmd->ResponseLen = 0;
7715 		return;
7716 	}
7717 
7718 	if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
7719 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7720 		cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
7721 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
7722 		    sizeof (EXT_MENLO_GET_FW_VERSION));
7723 		cmd->ResponseLen = 0;
7724 		return;
7725 	}
7726 
7727 	/* Allocate packet. */
7728 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7729 	if (pkt == NULL) {
7730 		EL(ha, "failed, kmem_zalloc\n");
7731 		cmd->Status = EXT_STATUS_NO_MEMORY;
7732 		cmd->ResponseLen = 0;
7733 		return;
7734 	}
7735 
7736 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7737 	pkt->mvfy.entry_count = 1;
7738 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
7739 
7740 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7741 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7742 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7743 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
7744 
7745 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7746 	    pkt->mvfy.options_status != CS_COMPLETE) {
7747 		/* Command error */
7748 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7749 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7750 		    pkt->mvfy.failure_code);
7751 		cmd->Status = EXT_STATUS_ERR;
7752 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7753 		    QL_FUNCTION_FAILED;
7754 		cmd->ResponseLen = 0;
7755 	} else if (ddi_copyout((void *)&ver,
7756 	    (void *)(uintptr_t)cmd->ResponseAdr,
7757 	    sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
7758 		EL(ha, "failed, ddi_copyout\n");
7759 		cmd->Status = EXT_STATUS_COPY_ERR;
7760 		cmd->ResponseLen = 0;
7761 	} else {
7762 		cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
7763 	}
7764 
7765 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7766 
7767 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7768 }
7769 
7770 /*
7771  * ql_menlo_update_fw
7772  *	Get Menlo update firmware.
7773  *
7774  * Input:
7775  *	ha:	adapter state pointer.
7776  *	bp:	buffer address.
7777  *	mode:	flags
7778  *
7779  * Returns:
7780  *
7781  * Context:
7782  *	Kernel context.
7783  */
7784 static void
7785 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7786 {
7787 	ql_mbx_iocb_t		*pkt;
7788 	dma_mem_t		*dma_mem;
7789 	EXT_MENLO_UPDATE_FW	fw;
7790 	uint32_t		*ptr32;
7791 	int			rval;
7792 
7793 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7794 
7795 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7796 		EL(ha, "failed, invalid request for HBA\n");
7797 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7798 		cmd->ResponseLen = 0;
7799 		return;
7800 	}
7801 
7802 	/*
7803 	 * TODO: only vp_index 0 can do this (?)
7804 	 */
7805 
7806 	/*  Verify the size of request structure. */
7807 	if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
7808 		/* Return error */
7809 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7810 		    sizeof (EXT_MENLO_UPDATE_FW));
7811 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7812 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7813 		cmd->ResponseLen = 0;
7814 		return;
7815 	}
7816 
7817 	/* Get update fw request. */
7818 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
7819 	    sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) {
7820 		EL(ha, "failed, ddi_copyin\n");
7821 		cmd->Status = EXT_STATUS_COPY_ERR;
7822 		cmd->ResponseLen = 0;
7823 		return;
7824 	}
7825 
7826 	/* Wait for I/O to stop and daemon to stall. */
7827 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7828 		EL(ha, "ql_stall_driver failed\n");
7829 		ql_restart_hba(ha);
7830 		cmd->Status = EXT_STATUS_BUSY;
7831 		cmd->ResponseLen = 0;
7832 		return;
7833 	}
7834 
7835 	/* Allocate packet. */
7836 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
7837 	if (dma_mem == NULL) {
7838 		EL(ha, "failed, kmem_zalloc\n");
7839 		cmd->Status = EXT_STATUS_NO_MEMORY;
7840 		cmd->ResponseLen = 0;
7841 		return;
7842 	}
7843 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7844 	if (pkt == NULL) {
7845 		EL(ha, "failed, kmem_zalloc\n");
7846 		kmem_free(dma_mem, sizeof (dma_mem_t));
7847 		ql_restart_hba(ha);
7848 		cmd->Status = EXT_STATUS_NO_MEMORY;
7849 		cmd->ResponseLen = 0;
7850 		return;
7851 	}
7852 
7853 	/* Get DMA memory for the IOCB */
7854 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
7855 	    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
7856 		cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
7857 		    "alloc failed", QL_NAME, ha->instance);
7858 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7859 		kmem_free(dma_mem, sizeof (dma_mem_t));
7860 		ql_restart_hba(ha);
7861 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
7862 		cmd->ResponseLen = 0;
7863 		return;
7864 	}
7865 
7866 	/* Get firmware data. */
7867 	if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
7868 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
7869 		EL(ha, "failed, get_buffer_data\n");
7870 		ql_free_dma_resource(ha, dma_mem);
7871 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7872 		kmem_free(dma_mem, sizeof (dma_mem_t));
7873 		ql_restart_hba(ha);
7874 		cmd->Status = EXT_STATUS_COPY_ERR;
7875 		cmd->ResponseLen = 0;
7876 		return;
7877 	}
7878 
7879 	/* Sync DMA buffer. */
7880 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
7881 	    DDI_DMA_SYNC_FORDEV);
7882 
7883 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7884 	pkt->mvfy.entry_count = 1;
7885 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
7886 	ptr32 = dma_mem->bp;
7887 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
7888 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
7889 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
7890 	pkt->mvfy.dseg_count = LE_16(1);
7891 	pkt->mvfy.dseg_0_address[0] = (uint32_t)
7892 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
7893 	pkt->mvfy.dseg_0_address[1] = (uint32_t)
7894 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
7895 	pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
7896 
7897 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7898 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7899 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7900 
7901 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7902 	    pkt->mvfy.options_status != CS_COMPLETE) {
7903 		/* Command error */
7904 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7905 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7906 		    pkt->mvfy.failure_code);
7907 		cmd->Status = EXT_STATUS_ERR;
7908 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7909 		    QL_FUNCTION_FAILED;
7910 		cmd->ResponseLen = 0;
7911 	}
7912 
7913 	ql_free_dma_resource(ha, dma_mem);
7914 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7915 	kmem_free(dma_mem, sizeof (dma_mem_t));
7916 	ql_restart_hba(ha);
7917 
7918 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7919 }
7920 
7921 /*
7922  * ql_menlo_manage_info
7923  *	Get Menlo manage info.
7924  *
7925  * Input:
7926  *	ha:	adapter state pointer.
7927  *	bp:	buffer address.
7928  *	mode:	flags
7929  *
7930  * Returns:
7931  *
7932  * Context:
7933  *	Kernel context.
7934  */
7935 static void
7936 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7937 {
7938 	ql_mbx_iocb_t		*pkt;
7939 	dma_mem_t		*dma_mem = NULL;
7940 	EXT_MENLO_MANAGE_INFO	info;
7941 	int			rval;
7942 
7943 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7944 
7945 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7946 		EL(ha, "failed, invalid request for HBA\n");
7947 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7948 		cmd->ResponseLen = 0;
7949 		return;
7950 	}
7951 
7952 	/*  Verify the size of request structure. */
7953 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
7954 		/* Return error */
7955 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7956 		    sizeof (EXT_MENLO_MANAGE_INFO));
7957 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7958 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7959 		cmd->ResponseLen = 0;
7960 		return;
7961 	}
7962 
7963 	/* Get manage info request. */
7964 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
7965 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
7966 		EL(ha, "failed, ddi_copyin\n");
7967 		cmd->Status = EXT_STATUS_COPY_ERR;
7968 		cmd->ResponseLen = 0;
7969 		return;
7970 	}
7971 
7972 	/* Allocate packet. */
7973 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7974 	if (pkt == NULL) {
7975 		EL(ha, "failed, kmem_zalloc\n");
7976 		ql_restart_driver(ha);
7977 		cmd->Status = EXT_STATUS_NO_MEMORY;
7978 		cmd->ResponseLen = 0;
7979 		return;
7980 	}
7981 
7982 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
7983 	pkt->mdata.entry_count = 1;
7984 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
7985 
7986 	/* Get DMA memory for the IOCB */
7987 	if (info.Operation == MENLO_OP_READ_MEM ||
7988 	    info.Operation == MENLO_OP_WRITE_MEM) {
7989 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
7990 		pkt->mdata.parameter_1 =
7991 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
7992 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
7993 		    KM_SLEEP);
7994 		if (dma_mem == NULL) {
7995 			EL(ha, "failed, kmem_zalloc\n");
7996 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7997 			cmd->Status = EXT_STATUS_NO_MEMORY;
7998 			cmd->ResponseLen = 0;
7999 			return;
8000 		}
8001 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8002 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8003 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8004 			    "alloc failed", QL_NAME, ha->instance);
8005 			kmem_free(dma_mem, sizeof (dma_mem_t));
8006 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8007 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8008 			cmd->ResponseLen = 0;
8009 			return;
8010 		}
8011 		if (info.Operation == MENLO_OP_WRITE_MEM) {
8012 			/* Get data. */
8013 			if (ql_get_buffer_data(
8014 			    (caddr_t)(uintptr_t)info.pDataBytes,
8015 			    dma_mem->bp, info.TotalByteCount, mode) !=
8016 			    info.TotalByteCount) {
8017 				EL(ha, "failed, get_buffer_data\n");
8018 				ql_free_dma_resource(ha, dma_mem);
8019 				kmem_free(dma_mem, sizeof (dma_mem_t));
8020 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8021 				cmd->Status = EXT_STATUS_COPY_ERR;
8022 				cmd->ResponseLen = 0;
8023 				return;
8024 			}
8025 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
8026 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
8027 		}
8028 		pkt->mdata.dseg_count = LE_16(1);
8029 		pkt->mdata.dseg_0_address[0] = (uint32_t)
8030 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8031 		pkt->mdata.dseg_0_address[1] = (uint32_t)
8032 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8033 		pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
8034 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8035 		pkt->mdata.parameter_1 =
8036 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8037 		pkt->mdata.parameter_2 =
8038 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8039 		pkt->mdata.parameter_3 =
8040 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8041 	} else if (info.Operation & MENLO_OP_GET_INFO) {
8042 		pkt->mdata.parameter_1 =
8043 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8044 		pkt->mdata.parameter_2 =
8045 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8046 	}
8047 
8048 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8049 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8050 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8051 
8052 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8053 	    pkt->mdata.options_status != CS_COMPLETE) {
8054 		/* Command error */
8055 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8056 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
8057 		    pkt->mdata.failure_code);
8058 		cmd->Status = EXT_STATUS_ERR;
8059 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8060 		    QL_FUNCTION_FAILED;
8061 		cmd->ResponseLen = 0;
8062 	} else if (info.Operation == MENLO_OP_READ_MEM) {
8063 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8064 		    DDI_DMA_SYNC_FORKERNEL);
8065 		if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8066 		    dma_mem->bp, info.TotalByteCount, mode) !=
8067 		    info.TotalByteCount) {
8068 			cmd->Status = EXT_STATUS_COPY_ERR;
8069 			cmd->ResponseLen = 0;
8070 		}
8071 	}
8072 
8073 	ql_free_dma_resource(ha, dma_mem);
8074 	kmem_free(dma_mem, sizeof (dma_mem_t));
8075 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8076 
8077 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8078 }
8079 
8080 /*
8081  * ql_suspend_hba
8082  *	Suspends all adapter ports.
8083  *
8084  * Input:
8085  *	ha:		adapter state pointer.
8086  *	options:	BIT_0 --> leave driver stalled on exit if
8087  *				  failed.
8088  *
8089  * Returns:
8090  *	ql local function return status code.
8091  *
8092  * Context:
8093  *	Kernel context.
8094  */
8095 static int
8096 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8097 {
8098 	ql_adapter_state_t	*ha2;
8099 	ql_link_t		*link;
8100 	int			rval = QL_SUCCESS;
8101 
8102 	/* Quiesce I/O on all adapter ports */
8103 	for (link = ql_hba.first; link != NULL; link = link->next) {
8104 		ha2 = link->base_address;
8105 
8106 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8107 			continue;
8108 		}
8109 
8110 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
8111 			EL(ha, "ql_stall_driver status=%xh\n", rval);
8112 			break;
8113 		}
8114 	}
8115 
8116 	return (rval);
8117 }
8118 
8119 /*
8120  * ql_restart_hba
8121  *	Restarts adapter.
8122  *
8123  * Input:
8124  *	ha:	adapter state pointer.
8125  *
8126  * Context:
8127  *	Kernel context.
8128  */
8129 static void
8130 ql_restart_hba(ql_adapter_state_t *ha)
8131 {
8132 	ql_adapter_state_t	*ha2;
8133 	ql_link_t		*link;
8134 
8135 	/* Resume I/O on all adapter ports */
8136 	for (link = ql_hba.first; link != NULL; link = link->next) {
8137 		ha2 = link->base_address;
8138 
8139 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8140 			continue;
8141 		}
8142 
8143 		ql_restart_driver(ha2);
8144 	}
8145 }
8146 
8147 /*
8148  * ql_get_vp_cnt_id
8149  *	Retrieves pci config space data
8150  *
8151  * Input:
8152  *	ha:	adapter state pointer.
8153  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8154  *	mode:	flags.
8155  *
8156  * Returns:
8157  *	None, request status indicated in cmd->Status.
8158  *
8159  * Context:
8160  *	Kernel context.
8161  *
8162  */
8163 static void
8164 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8165 {
8166 	ql_adapter_state_t	*vha;
8167 	PEXT_VPORT_ID_CNT	ptmp_vp;
8168 	int			id = 0;
8169 	int			rval;
8170 	char			name[MAXPATHLEN];
8171 
8172 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8173 
8174 	/*
8175 	 * To be backward compatible with older API
8176 	 * check for the size of old EXT_VPORT_ID_CNT
8177 	 */
8178 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
8179 	    (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
8180 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8181 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
8182 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
8183 		    cmd->ResponseLen);
8184 		cmd->ResponseLen = 0;
8185 		return;
8186 	}
8187 
8188 	ptmp_vp = (EXT_VPORT_ID_CNT *)
8189 	    kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
8190 	if (ptmp_vp == NULL) {
8191 		EL(ha, "failed, kmem_zalloc\n");
8192 		cmd->ResponseLen = 0;
8193 		return;
8194 	}
8195 	vha = ha->vp_next;
8196 	while (vha != NULL) {
8197 		ptmp_vp->VpCnt++;
8198 		ptmp_vp->VpId[id] = vha->vp_index;
8199 		(void) ddi_pathname(vha->dip, name);
8200 		(void) strcpy((char *)ptmp_vp->vp_path[id], name);
8201 		ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
8202 		id++;
8203 		vha = vha->vp_next;
8204 	}
8205 	rval = ddi_copyout((void *)ptmp_vp,
8206 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8207 	    cmd->ResponseLen, mode);
8208 	if (rval != 0) {
8209 		cmd->Status = EXT_STATUS_COPY_ERR;
8210 		cmd->ResponseLen = 0;
8211 		EL(ha, "failed, ddi_copyout\n");
8212 	} else {
8213 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
8214 		QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n",
8215 		    ha->instance, ptmp_vp->VpCnt);
8216 	}
8217 
8218 }
8219 
8220 /*
8221  * ql_vp_ioctl
8222  *	Performs all EXT_CC_VPORT_CMD functions.
8223  *
8224  * Input:
8225  *	ha:	adapter state pointer.
8226  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8227  *	mode:	flags.
8228  *
8229  * Returns:
8230  *	None, request status indicated in cmd->Status.
8231  *
8232  * Context:
8233  *	Kernel context.
8234  */
8235 static void
8236 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8237 {
8238 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
8239 	    cmd->SubCode);
8240 
8241 	/* case off on command subcode */
8242 	switch (cmd->SubCode) {
8243 	case EXT_VF_SC_VPORT_GETINFO:
8244 		ql_qry_vport(ha, cmd, mode);
8245 		break;
8246 	default:
8247 		/* function not supported. */
8248 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
8249 		EL(ha, "failed, Unsupported Subcode=%xh\n",
8250 		    cmd->SubCode);
8251 		break;
8252 	}
8253 
8254 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8255 }
8256 
8257 /*
8258  * ql_qry_vport
8259  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
8260  *
8261  * Input:
8262  *	ha:	adapter state pointer.
8263  *	cmd:	EXT_IOCTL cmd struct pointer.
8264  *	mode:	flags.
8265  *
8266  * Returns:
8267  *	None, request status indicated in cmd->Status.
8268  *
8269  * Context:
8270  *	Kernel context.
8271  */
8272 static void
8273 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
8274 {
8275 	ql_adapter_state_t	*tmp_vha;
8276 	EXT_VPORT_INFO		tmp_vport = {0};
8277 	int			max_vport;
8278 
8279 	QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance);
8280 
8281 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
8282 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8283 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
8284 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
8285 		    cmd->ResponseLen);
8286 		cmd->ResponseLen = 0;
8287 		return;
8288 	}
8289 
8290 	/* Fill in the vport information. */
8291 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
8292 	    EXT_DEF_WWN_NAME_SIZE);
8293 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
8294 	    EXT_DEF_WWN_NAME_SIZE);
8295 	tmp_vport.state = vha->state;
8296 
8297 	tmp_vha = vha->pha->vp_next;
8298 	while (tmp_vha != NULL) {
8299 		tmp_vport.used++;
8300 		tmp_vha = tmp_vha->vp_next;
8301 	}
8302 
8303 	max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
8304 	    MAX_25_VIRTUAL_PORTS);
8305 	if (max_vport > tmp_vport.used) {
8306 		tmp_vport.free = max_vport - tmp_vport.used;
8307 	}
8308 
8309 	if (ddi_copyout((void *)&tmp_vport,
8310 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8311 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
8312 		cmd->Status = EXT_STATUS_COPY_ERR;
8313 		cmd->ResponseLen = 0;
8314 		EL(vha, "failed, ddi_copyout\n");
8315 	} else {
8316 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
8317 		QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance);
8318 	}
8319 }
8320 
8321 /*
8322  * ql_access_flash
8323  *	Performs all EXT_CC_ACCESS_FLASH_OS functions.
8324  *
8325  * Input:
8326  *	pi:	port info pointer.
8327  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8328  *	mode:	flags.
8329  *
8330  * Returns:
8331  *	None, request status indicated in cmd->Status.
8332  *
8333  * Context:
8334  *	Kernel context.
8335  */
8336 static void
8337 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8338 {
8339 	int	rval;
8340 
8341 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8342 
8343 	switch (cmd->SubCode) {
8344 	case EXT_SC_FLASH_READ:
8345 		if ((rval = ql_flash_fcode_dump(ha,
8346 		    (void *)(uintptr_t)(cmd->ResponseAdr),
8347 		    (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
8348 			cmd->Status = EXT_STATUS_COPY_ERR;
8349 			cmd->ResponseLen = 0;
8350 			EL(ha, "flash_fcode_dump status=%xh\n", rval);
8351 		}
8352 		break;
8353 	case EXT_SC_FLASH_WRITE:
8354 		if ((rval = ql_r_m_w_flash(ha,
8355 		    (void *)(uintptr_t)(cmd->RequestAdr),
8356 		    (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
8357 		    QL_SUCCESS) {
8358 			cmd->Status = EXT_STATUS_COPY_ERR;
8359 			cmd->ResponseLen = 0;
8360 			EL(ha, "r_m_w_flash status=%xh\n", rval);
8361 		} else {
8362 			/* Reset caches on all adapter instances. */
8363 			ql_update_flash_caches(ha);
8364 		}
8365 		break;
8366 	default:
8367 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8368 		cmd->Status = EXT_STATUS_ERR;
8369 		cmd->ResponseLen = 0;
8370 		break;
8371 	}
8372 
8373 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8374 }
8375 
8376 /*
8377  * ql_reset_cmd
8378  *	Performs all EXT_CC_RESET_FW_OS functions.
8379  *
8380  * Input:
8381  *	ha:	adapter state pointer.
8382  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8383  *
8384  * Returns:
8385  *	None, request status indicated in cmd->Status.
8386  *
8387  * Context:
8388  *	Kernel context.
8389  */
8390 static void
8391 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
8392 {
8393 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8394 
8395 	switch (cmd->SubCode) {
8396 	case EXT_SC_RESET_FC_FW:
8397 		EL(ha, "isp_abort_needed\n");
8398 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
8399 		break;
8400 	case EXT_SC_RESET_MPI_FW:
8401 		if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8402 			EL(ha, "invalid request for HBA\n");
8403 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
8404 			cmd->ResponseLen = 0;
8405 		} else {
8406 			/* Wait for I/O to stop and daemon to stall. */
8407 			if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8408 				EL(ha, "ql_suspend_hba failed\n");
8409 				cmd->Status = EXT_STATUS_BUSY;
8410 				cmd->ResponseLen = 0;
8411 			} else if (ql_restart_mpi(ha) != QL_SUCCESS) {
8412 				cmd->Status = EXT_STATUS_ERR;
8413 				cmd->ResponseLen = 0;
8414 			}
8415 			ql_restart_hba(ha);
8416 		}
8417 		break;
8418 	default:
8419 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8420 		cmd->Status = EXT_STATUS_ERR;
8421 		cmd->ResponseLen = 0;
8422 		break;
8423 	}
8424 
8425 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8426 }
8427