xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_xioctl.c (revision 20d217c8569fadc52e6956aa7fcc78efd8d1f1b5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_xioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_iocb.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local data
55  */
56 
57 /*
58  * Local prototypes
59  */
60 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
61 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
62     boolean_t (*)(EXT_IOCTL *));
63 static boolean_t ql_validate_signature(EXT_IOCTL *);
64 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
65 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
66 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
67 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
68 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
90 
91 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
92 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
93 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
94 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
95     uint8_t);
96 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
97 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
98 static int ql_24xx_flash_desc(ql_adapter_state_t *);
99 static int ql_setup_flash(ql_adapter_state_t *);
100 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
101 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
102 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
103     uint32_t, int);
104 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
105     uint8_t);
106 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
107 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
108 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
109 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
110 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
115 static uint32_t ql_setup_led(ql_adapter_state_t *);
116 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
117 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
120 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
121 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
122 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
124 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
125 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
126 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
127 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
128 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
129 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
130 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
131 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
132 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
133 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
138 static void ql_restart_hba(ql_adapter_state_t *);
139 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
140 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
144 static void ql_update_flash_caches(ql_adapter_state_t *);
145 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
146 
147 /* ******************************************************************** */
148 /*			External IOCTL support.				*/
149 /* ******************************************************************** */
150 
151 /*
152  * ql_alloc_xioctl_resource
153  *	Allocates resources needed by module code.
154  *
155  * Input:
156  *	ha:		adapter state pointer.
157  *
158  * Returns:
159  *	SYS_ERRNO
160  *
161  * Context:
162  *	Kernel context.
163  */
164 int
165 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
166 {
167 	ql_xioctl_t	*xp;
168 
169 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
170 
171 	if (ha->xioctl != NULL) {
172 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
173 		    ha->instance);
174 		return (0);
175 	}
176 
177 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
178 	if (xp == NULL) {
179 		EL(ha, "failed, kmem_zalloc\n");
180 		return (ENOMEM);
181 	}
182 	ha->xioctl = xp;
183 
184 	/* Allocate AEN tracking buffer */
185 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
186 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
187 	if (xp->aen_tracking_queue == NULL) {
188 		EL(ha, "failed, kmem_zalloc-2\n");
189 		ql_free_xioctl_resource(ha);
190 		return (ENOMEM);
191 	}
192 
193 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
194 
195 	return (0);
196 }
197 
198 /*
199  * ql_free_xioctl_resource
200  *	Frees resources used by module code.
201  *
202  * Input:
203  *	ha:		adapter state pointer.
204  *
205  * Context:
206  *	Kernel context.
207  */
208 void
209 ql_free_xioctl_resource(ql_adapter_state_t *ha)
210 {
211 	ql_xioctl_t	*xp = ha->xioctl;
212 
213 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
214 
215 	if (xp == NULL) {
216 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
217 		return;
218 	}
219 
220 	if (xp->aen_tracking_queue != NULL) {
221 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
222 		    sizeof (EXT_ASYNC_EVENT));
223 		xp->aen_tracking_queue = NULL;
224 	}
225 
226 	kmem_free(xp, sizeof (ql_xioctl_t));
227 	ha->xioctl = NULL;
228 
229 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
230 }
231 
232 /*
233  * ql_xioctl
234  *	External IOCTL processing.
235  *
236  * Input:
237  *	ha:	adapter state pointer.
238  *	cmd:	function to perform
239  *	arg:	data type varies with request
240  *	mode:	flags
241  *	cred_p:	credentials pointer
242  *	rval_p:	pointer to result value
243  *
244  * Returns:
245  *	0:		success
246  *	ENXIO:		No such device or address
247  *	ENOPROTOOPT:	Protocol not available
248  *
249  * Context:
250  *	Kernel context.
251  */
252 /* ARGSUSED */
253 int
254 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
255     cred_t *cred_p, int *rval_p)
256 {
257 	int	rval;
258 
259 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
260 
261 	if (ha->xioctl == NULL) {
262 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
263 		return (ENXIO);
264 	}
265 
266 	switch (cmd) {
267 	case EXT_CC_QUERY:
268 	case EXT_CC_SEND_FCCT_PASSTHRU:
269 	case EXT_CC_REG_AEN:
270 	case EXT_CC_GET_AEN:
271 	case EXT_CC_SEND_SCSI_PASSTHRU:
272 	case EXT_CC_WWPN_TO_SCSIADDR:
273 	case EXT_CC_SEND_ELS_RNID:
274 	case EXT_CC_SET_DATA:
275 	case EXT_CC_GET_DATA:
276 	case EXT_CC_HOST_IDX:
277 	case EXT_CC_READ_NVRAM:
278 	case EXT_CC_UPDATE_NVRAM:
279 	case EXT_CC_READ_OPTION_ROM:
280 	case EXT_CC_READ_OPTION_ROM_EX:
281 	case EXT_CC_UPDATE_OPTION_ROM:
282 	case EXT_CC_UPDATE_OPTION_ROM_EX:
283 	case EXT_CC_GET_VPD:
284 	case EXT_CC_SET_VPD:
285 	case EXT_CC_LOOPBACK:
286 	case EXT_CC_GET_FCACHE:
287 	case EXT_CC_GET_FCACHE_EX:
288 	case EXT_CC_HOST_DRVNAME:
289 	case EXT_CC_GET_SFP_DATA:
290 	case EXT_CC_PORT_PARAM:
291 	case EXT_CC_GET_PCI_DATA:
292 	case EXT_CC_GET_FWEXTTRACE:
293 	case EXT_CC_GET_FWFCETRACE:
294 	case EXT_CC_GET_VP_CNT_ID:
295 	case EXT_CC_VPORT_CMD:
296 	case EXT_CC_ACCESS_FLASH:
297 	case EXT_CC_RESET_FW:
298 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
299 		break;
300 	default:
301 		/* function not supported. */
302 		EL(ha, "function=%d not supported\n", cmd);
303 		rval = ENOPROTOOPT;
304 	}
305 
306 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
307 
308 	return (rval);
309 }
310 
311 /*
312  * ql_sdm_ioctl
313  *	Provides ioctl functions for SAN/Device Management functions
314  *	AKA External Ioctl functions.
315  *
316  * Input:
317  *	ha:		adapter state pointer.
318  *	ioctl_code:	ioctl function to perform
319  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
320  *	mode:		flags
321  *
322  * Returns:
323  *	0:	success
324  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
325  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
326  *		copyout of EXT_IOCTL status info failed.
327  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
328  *	EBUSY:	Device busy
329  *
330  * Context:
331  *	Kernel context.
332  */
333 static int
334 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
335 {
336 	EXT_IOCTL		*cmd;
337 	int			rval;
338 	ql_adapter_state_t	*vha;
339 
340 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
341 
342 	/* Copy argument structure (EXT_IOCTL) from application land. */
343 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
344 	    ql_validate_signature)) != 0) {
345 		/*
346 		 * a non-zero value at this time means a problem getting
347 		 * the requested information from application land, just
348 		 * return the error code and hope for the best.
349 		 */
350 		EL(ha, "failed, sdm_setup\n");
351 		return (rval);
352 	}
353 
354 	/*
355 	 * Map the physical ha ptr (which the ioctl is called with)
356 	 * to the virtual ha that the caller is addressing.
357 	 */
358 	if (ha->flags & VP_ENABLED) {
359 		/*
360 		 * Special case: HbaSelect == 0 is physical ha
361 		 */
362 		if (cmd->HbaSelect != 0) {
363 			vha = ha->vp_next;
364 			while (vha != NULL) {
365 				if (vha->vp_index == cmd->HbaSelect) {
366 					ha = vha;
367 					break;
368 				}
369 				vha = vha->vp_next;
370 			}
371 
372 			/*
373 			 * If we can't find the specified vp index then
374 			 * we probably have an error (vp indexes shifting
375 			 * under our feet?).
376 			 */
377 			if (vha == NULL) {
378 				EL(ha, "Invalid HbaSelect vp index: %xh\n",
379 				    cmd->HbaSelect);
380 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
381 				cmd->ResponseLen = 0;
382 				return (EFAULT);
383 			}
384 		}
385 	}
386 
387 	/*
388 	 * If driver is suspended, stalled, or powered down rtn BUSY
389 	 */
390 	if (ha->flags & ADAPTER_SUSPENDED ||
391 	    ha->task_daemon_flags & DRIVER_STALL ||
392 	    ha->power_level != PM_LEVEL_D0) {
393 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
394 		    "driver suspended" :
395 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
396 		    "FCA powered down"));
397 		cmd->Status = EXT_STATUS_BUSY;
398 		cmd->ResponseLen = 0;
399 		rval = EBUSY;
400 
401 		/* Return results to caller */
402 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
403 			EL(ha, "failed, sdm_return\n");
404 			rval = EFAULT;
405 		}
406 		return (rval);
407 	}
408 
409 	switch (ioctl_code) {
410 	case EXT_CC_QUERY_OS:
411 		ql_query(ha, cmd, mode);
412 		break;
413 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
414 		ql_fcct(ha, cmd, mode);
415 		break;
416 	case EXT_CC_REG_AEN_OS:
417 		ql_aen_reg(ha, cmd, mode);
418 		break;
419 	case EXT_CC_GET_AEN_OS:
420 		ql_aen_get(ha, cmd, mode);
421 		break;
422 	case EXT_CC_GET_DATA_OS:
423 		ql_get_host_data(ha, cmd, mode);
424 		break;
425 	case EXT_CC_SET_DATA_OS:
426 		ql_set_host_data(ha, cmd, mode);
427 		break;
428 	case EXT_CC_SEND_ELS_RNID_OS:
429 		ql_send_els_rnid(ha, cmd, mode);
430 		break;
431 	case EXT_CC_SCSI_PASSTHRU_OS:
432 		ql_scsi_passthru(ha, cmd, mode);
433 		break;
434 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
435 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
436 		break;
437 	case EXT_CC_HOST_IDX_OS:
438 		ql_host_idx(ha, cmd, mode);
439 		break;
440 	case EXT_CC_HOST_DRVNAME_OS:
441 		ql_host_drvname(ha, cmd, mode);
442 		break;
443 	case EXT_CC_READ_NVRAM_OS:
444 		ql_read_nvram(ha, cmd, mode);
445 		break;
446 	case EXT_CC_UPDATE_NVRAM_OS:
447 		ql_write_nvram(ha, cmd, mode);
448 		break;
449 	case EXT_CC_READ_OPTION_ROM_OS:
450 	case EXT_CC_READ_OPTION_ROM_EX_OS:
451 		ql_read_flash(ha, cmd, mode);
452 		break;
453 	case EXT_CC_UPDATE_OPTION_ROM_OS:
454 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
455 		ql_write_flash(ha, cmd, mode);
456 		break;
457 	case EXT_CC_LOOPBACK_OS:
458 		ql_diagnostic_loopback(ha, cmd, mode);
459 		break;
460 	case EXT_CC_GET_VPD_OS:
461 		ql_read_vpd(ha, cmd, mode);
462 		break;
463 	case EXT_CC_SET_VPD_OS:
464 		ql_write_vpd(ha, cmd, mode);
465 		break;
466 	case EXT_CC_GET_FCACHE_OS:
467 		ql_get_fcache(ha, cmd, mode);
468 		break;
469 	case EXT_CC_GET_FCACHE_EX_OS:
470 		ql_get_fcache_ex(ha, cmd, mode);
471 		break;
472 	case EXT_CC_GET_SFP_DATA_OS:
473 		ql_get_sfp(ha, cmd, mode);
474 		break;
475 	case EXT_CC_PORT_PARAM_OS:
476 		ql_port_param(ha, cmd, mode);
477 		break;
478 	case EXT_CC_GET_PCI_DATA_OS:
479 		ql_get_pci_data(ha, cmd, mode);
480 		break;
481 	case EXT_CC_GET_FWEXTTRACE_OS:
482 		ql_get_fwexttrace(ha, cmd, mode);
483 		break;
484 	case EXT_CC_GET_FWFCETRACE_OS:
485 		ql_get_fwfcetrace(ha, cmd, mode);
486 		break;
487 	case EXT_CC_MENLO_RESET:
488 		ql_menlo_reset(ha, cmd, mode);
489 		break;
490 	case EXT_CC_MENLO_GET_FW_VERSION:
491 		ql_menlo_get_fw_version(ha, cmd, mode);
492 		break;
493 	case EXT_CC_MENLO_UPDATE_FW:
494 		ql_menlo_update_fw(ha, cmd, mode);
495 		break;
496 	case EXT_CC_MENLO_MANAGE_INFO:
497 		ql_menlo_manage_info(ha, cmd, mode);
498 		break;
499 	case EXT_CC_GET_VP_CNT_ID_OS:
500 		ql_get_vp_cnt_id(ha, cmd, mode);
501 		break;
502 	case EXT_CC_VPORT_CMD_OS:
503 		ql_vp_ioctl(ha, cmd, mode);
504 		break;
505 	case EXT_CC_ACCESS_FLASH_OS:
506 		ql_access_flash(ha, cmd, mode);
507 		break;
508 	case EXT_CC_RESET_FW_OS:
509 		ql_reset_cmd(ha, cmd);
510 		break;
511 	default:
512 		/* function not supported. */
513 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
514 
515 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
516 		cmd->ResponseLen = 0;
517 		break;
518 	}
519 
520 	/* Return results to caller */
521 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
522 		EL(ha, "failed, sdm_return\n");
523 		return (EFAULT);
524 	}
525 
526 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
527 
528 	return (0);
529 }
530 
531 /*
532  * ql_sdm_setup
533  *	Make a local copy of the EXT_IOCTL struct and validate it.
534  *
535  * Input:
536  *	ha:		adapter state pointer.
537  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
538  *	arg:		Address of application EXT_IOCTL cmd data
539  *	mode:		flags
540  *	val_sig:	Pointer to a function to validate the ioctl signature.
541  *
542  * Returns:
543  *	0:		success
544  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
545  *	EINVAL:		Invalid version, signature.
546  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
547  *
548  * Context:
549  *	Kernel context.
550  */
551 static int
552 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
553     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
554 {
555 	int		rval;
556 	EXT_IOCTL	*cmd;
557 
558 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
559 
560 	/* Allocate local memory for EXT_IOCTL. */
561 	*cmd_struct = NULL;
562 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
563 	if (cmd == NULL) {
564 		EL(ha, "failed, kmem_zalloc\n");
565 		return (ENOMEM);
566 	}
567 	/* Get argument structure. */
568 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
569 	if (rval != 0) {
570 		EL(ha, "failed, ddi_copyin\n");
571 		rval = EFAULT;
572 	} else {
573 		/*
574 		 * Check signature and the version.
575 		 * If either are not valid then neither is the
576 		 * structure so don't attempt to return any error status
577 		 * because we can't trust what caller's arg points to.
578 		 * Just return the errno.
579 		 */
580 		if (val_sig(cmd) == 0) {
581 			EL(ha, "failed, signature\n");
582 			rval = EINVAL;
583 		} else if (cmd->Version > EXT_VERSION) {
584 			EL(ha, "failed, version\n");
585 			rval = EINVAL;
586 		}
587 	}
588 
589 	if (rval == 0) {
590 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
591 		*cmd_struct = cmd;
592 		cmd->Status = EXT_STATUS_OK;
593 		cmd->DetailStatus = 0;
594 	} else {
595 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
596 	}
597 
598 	return (rval);
599 }
600 
601 /*
602  * ql_validate_signature
603  *	Validate the signature string for an external ioctl call.
604  *
605  * Input:
606  *	sg:	Pointer to EXT_IOCTL signature to validate.
607  *
608  * Returns:
609  *	B_TRUE:		Signature is valid.
610  *	B_FALSE:	Signature is NOT valid.
611  *
612  * Context:
613  *	Kernel context.
614  */
615 static boolean_t
616 ql_validate_signature(EXT_IOCTL *cmd_struct)
617 {
618 	/*
619 	 * Check signature.
620 	 *
621 	 * If signature is not valid then neither is the rest of
622 	 * the structure (e.g., can't trust it), so don't attempt
623 	 * to return any error status other than the errno.
624 	 */
625 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
626 		QL_PRINT_2(CE_CONT, "failed,\n");
627 		return (B_FALSE);
628 	}
629 
630 	return (B_TRUE);
631 }
632 
633 /*
634  * ql_sdm_return
635  *	Copies return data/status to application land for
636  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
637  *
638  * Input:
639  *	ha:		adapter state pointer.
640  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
641  *	ioctl_code:	ioctl function to perform
642  *	arg:		EXT_IOCTL cmd data in application land.
643  *	mode:		flags
644  *
645  * Returns:
646  *	0:	success
647  *	EFAULT:	Copy out error.
648  *
649  * Context:
650  *	Kernel context.
651  */
652 /* ARGSUSED */
653 static int
654 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
655 {
656 	int	rval = 0;
657 
658 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
659 
660 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
661 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
662 	    mode);
663 
664 	rval |= ddi_copyout((void *)&cmd->Status,
665 	    (void *)&(((EXT_IOCTL*)arg)->Status),
666 	    sizeof (cmd->Status), mode);
667 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
668 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
669 	    sizeof (cmd->DetailStatus), mode);
670 
671 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
672 
673 	if (rval != 0) {
674 		/* Some copyout operation failed */
675 		EL(ha, "failed, ddi_copyout\n");
676 		return (EFAULT);
677 	}
678 
679 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
680 
681 	return (0);
682 }
683 
684 /*
685  * ql_query
686  *	Performs all EXT_CC_QUERY functions.
687  *
688  * Input:
689  *	ha:	adapter state pointer.
690  *	cmd:	Local EXT_IOCTL cmd struct pointer.
691  *	mode:	flags.
692  *
693  * Returns:
694  *	None, request status indicated in cmd->Status.
695  *
696  * Context:
697  *	Kernel context.
698  */
699 static void
700 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
701 {
702 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
703 	    cmd->SubCode);
704 
705 	/* case off on command subcode */
706 	switch (cmd->SubCode) {
707 	case EXT_SC_QUERY_HBA_NODE:
708 		ql_qry_hba_node(ha, cmd, mode);
709 		break;
710 	case EXT_SC_QUERY_HBA_PORT:
711 		ql_qry_hba_port(ha, cmd, mode);
712 		break;
713 	case EXT_SC_QUERY_DISC_PORT:
714 		ql_qry_disc_port(ha, cmd, mode);
715 		break;
716 	case EXT_SC_QUERY_DISC_TGT:
717 		ql_qry_disc_tgt(ha, cmd, mode);
718 		break;
719 	case EXT_SC_QUERY_DRIVER:
720 		ql_qry_driver(ha, cmd, mode);
721 		break;
722 	case EXT_SC_QUERY_FW:
723 		ql_qry_fw(ha, cmd, mode);
724 		break;
725 	case EXT_SC_QUERY_CHIP:
726 		ql_qry_chip(ha, cmd, mode);
727 		break;
728 	case EXT_SC_QUERY_DISC_LUN:
729 	default:
730 		/* function not supported. */
731 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
732 		EL(ha, "failed, Unsupported Subcode=%xh\n",
733 		    cmd->SubCode);
734 		break;
735 	}
736 
737 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
738 }
739 
740 /*
741  * ql_qry_hba_node
742  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
743  *
744  * Input:
745  *	ha:	adapter state pointer.
746  *	cmd:	EXT_IOCTL cmd struct pointer.
747  *	mode:	flags.
748  *
749  * Returns:
750  *	None, request status indicated in cmd->Status.
751  *
752  * Context:
753  *	Kernel context.
754  */
755 static void
756 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
757 {
758 	EXT_HBA_NODE	tmp_node = {0};
759 	uint_t		len;
760 	caddr_t		bufp;
761 	ql_mbx_data_t	mr;
762 
763 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
764 
765 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
766 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
767 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
768 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
769 		    "Len=%xh\n", cmd->ResponseLen);
770 		cmd->ResponseLen = 0;
771 		return;
772 	}
773 
774 	/* fill in the values */
775 
776 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
777 	    EXT_DEF_WWN_NAME_SIZE);
778 
779 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
780 
781 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
782 
783 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
784 
785 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
786 
787 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
788 		size_t		verlen;
789 		uint16_t	w;
790 		char		*tmpptr;
791 
792 		verlen = strlen((char *)(tmp_node.DriverVersion));
793 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
794 			EL(ha, "failed, No room for fpga version string\n");
795 		} else {
796 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
797 			    (uint16_t *)
798 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
799 
800 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
801 			if (tmpptr == NULL) {
802 				EL(ha, "Unable to insert fpga version str\n");
803 			} else {
804 				(void) sprintf(tmpptr, "%d.%d",
805 				    ((w & 0xf0) >> 4), (w & 0x0f));
806 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
807 			}
808 		}
809 	}
810 	(void) ql_get_fw_version(ha, &mr);
811 
812 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
813 	    mr.mb[1], mr.mb[2], mr.mb[3]);
814 
815 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
816 		switch (mr.mb[6]) {
817 		case FWATTRIB_EF:
818 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
819 			break;
820 		case FWATTRIB_TP:
821 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
822 			break;
823 		case FWATTRIB_IP:
824 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
825 			break;
826 		case FWATTRIB_IPX:
827 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
828 			break;
829 		case FWATTRIB_FL:
830 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
831 			break;
832 		case FWATTRIB_FPX:
833 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
834 			break;
835 		default:
836 			break;
837 		}
838 	}
839 
840 	/* FCode version. */
841 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
842 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
843 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
844 	    (int *)&len) == DDI_PROP_SUCCESS) {
845 		if (len < EXT_DEF_MAX_STR_SIZE) {
846 			bcopy(bufp, tmp_node.OptRomVersion, len);
847 		} else {
848 			bcopy(bufp, tmp_node.OptRomVersion,
849 			    EXT_DEF_MAX_STR_SIZE - 1);
850 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
851 			    '\0';
852 		}
853 		kmem_free(bufp, len);
854 	} else {
855 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
856 	}
857 	tmp_node.PortCount = 1;
858 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
859 
860 	if (ddi_copyout((void *)&tmp_node,
861 	    (void *)(uintptr_t)(cmd->ResponseAdr),
862 	    sizeof (EXT_HBA_NODE), mode) != 0) {
863 		cmd->Status = EXT_STATUS_COPY_ERR;
864 		cmd->ResponseLen = 0;
865 		EL(ha, "failed, ddi_copyout\n");
866 	} else {
867 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
868 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
869 	}
870 }
871 
872 /*
873  * ql_qry_hba_port
874  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
875  *
876  * Input:
877  *	ha:	adapter state pointer.
878  *	cmd:	EXT_IOCTL cmd struct pointer.
879  *	mode:	flags.
880  *
881  * Returns:
882  *	None, request status indicated in cmd->Status.
883  *
884  * Context:
885  *	Kernel context.
886  */
887 static void
888 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
889 {
890 	ql_link_t	*link;
891 	ql_tgt_t	*tq;
892 	ql_mbx_data_t	mr;
893 	EXT_HBA_PORT	tmp_port = {0};
894 	int		rval;
895 	uint16_t	port_cnt, tgt_cnt, index;
896 
897 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
898 
899 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
900 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
901 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
902 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
903 		    cmd->ResponseLen);
904 		cmd->ResponseLen = 0;
905 		return;
906 	}
907 
908 	/* fill in the values */
909 
910 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
911 	    EXT_DEF_WWN_NAME_SIZE);
912 	tmp_port.Id[0] = 0;
913 	tmp_port.Id[1] = ha->d_id.b.domain;
914 	tmp_port.Id[2] = ha->d_id.b.area;
915 	tmp_port.Id[3] = ha->d_id.b.al_pa;
916 
917 	/* For now we are initiator only driver */
918 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
919 
920 	if (ha->task_daemon_flags & LOOP_DOWN) {
921 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
922 	} else if (DRIVER_SUSPENDED(ha)) {
923 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
924 	} else {
925 		tmp_port.State = EXT_DEF_HBA_OK;
926 	}
927 
928 	if (ha->flags & POINT_TO_POINT) {
929 		tmp_port.Mode = EXT_DEF_P2P_MODE;
930 	} else {
931 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
932 	}
933 	/*
934 	 * fill in the portspeed values.
935 	 *
936 	 * default to not yet negotiated state
937 	 */
938 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
939 
940 	if (tmp_port.State == EXT_DEF_HBA_OK) {
941 		if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) {
942 			mr.mb[1] = 0;
943 			mr.mb[2] = 0;
944 			rval = ql_data_rate(ha, &mr);
945 			if (rval != QL_SUCCESS) {
946 				EL(ha, "failed, data_rate=%xh\n", rval);
947 			} else {
948 				switch (mr.mb[1]) {
949 				case IIDMA_RATE_1GB:
950 					tmp_port.PortSpeed =
951 					    EXT_DEF_PORTSPEED_1GBIT;
952 					break;
953 				case IIDMA_RATE_2GB:
954 					tmp_port.PortSpeed =
955 					    EXT_DEF_PORTSPEED_2GBIT;
956 					break;
957 				case IIDMA_RATE_4GB:
958 					tmp_port.PortSpeed =
959 					    EXT_DEF_PORTSPEED_4GBIT;
960 					break;
961 				case IIDMA_RATE_8GB:
962 					tmp_port.PortSpeed =
963 					    EXT_DEF_PORTSPEED_8GBIT;
964 					break;
965 				case IIDMA_RATE_10GB:
966 					tmp_port.PortSpeed =
967 					    EXT_DEF_PORTSPEED_10GBIT;
968 					break;
969 				default:
970 					tmp_port.PortSpeed =
971 					    EXT_DEF_PORTSPEED_UNKNOWN;
972 					EL(ha, "failed, data rate=%xh\n",
973 					    mr.mb[1]);
974 					break;
975 				}
976 			}
977 		} else {
978 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
979 		}
980 	}
981 
982 	/* Report all supported port speeds */
983 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
984 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
985 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
986 		    EXT_DEF_PORTSPEED_1GBIT);
987 		/*
988 		 * Correct supported speeds based on type of
989 		 * sfp that is present
990 		 */
991 		switch (ha->sfp_stat) {
992 		case 1:
993 			/* no sfp detected */
994 			break;
995 		case 2:
996 		case 4:
997 			/* 4GB sfp */
998 			tmp_port.PortSupportedSpeed &=
999 			    ~EXT_DEF_PORTSPEED_8GBIT;
1000 			break;
1001 		case 3:
1002 		case 5:
1003 			/* 8GB sfp */
1004 			tmp_port.PortSupportedSpeed &=
1005 			    ~EXT_DEF_PORTSPEED_1GBIT;
1006 			break;
1007 		default:
1008 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1009 			break;
1010 
1011 		}
1012 	} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
1013 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1014 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1015 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1016 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1017 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1018 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1019 		    EXT_DEF_PORTSPEED_1GBIT);
1020 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1021 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1022 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1023 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1024 	} else {
1025 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1026 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1027 	}
1028 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1029 	port_cnt = 0;
1030 	tgt_cnt = 0;
1031 
1032 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1033 		for (link = ha->dev[index].first; link != NULL;
1034 		    link = link->next) {
1035 			tq = link->base_address;
1036 
1037 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1038 				continue;
1039 			}
1040 
1041 			port_cnt++;
1042 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1043 				tgt_cnt++;
1044 			}
1045 		}
1046 	}
1047 
1048 	tmp_port.DiscPortCount = port_cnt;
1049 	tmp_port.DiscTargetCount = tgt_cnt;
1050 
1051 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1052 
1053 	rval = ddi_copyout((void *)&tmp_port,
1054 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1055 	    sizeof (EXT_HBA_PORT), mode);
1056 	if (rval != 0) {
1057 		cmd->Status = EXT_STATUS_COPY_ERR;
1058 		cmd->ResponseLen = 0;
1059 		EL(ha, "failed, ddi_copyout\n");
1060 	} else {
1061 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1062 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1063 		    ha->instance, port_cnt, tgt_cnt);
1064 	}
1065 }
1066 
1067 /*
1068  * ql_qry_disc_port
1069  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1070  *
1071  * Input:
1072  *	ha:	adapter state pointer.
1073  *	cmd:	EXT_IOCTL cmd struct pointer.
1074  *	mode:	flags.
1075  *
1076  *	cmd->Instance = Port instance in fcport chain.
1077  *
1078  * Returns:
1079  *	None, request status indicated in cmd->Status.
1080  *
1081  * Context:
1082  *	Kernel context.
1083  */
1084 static void
1085 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1086 {
1087 	EXT_DISC_PORT	tmp_port = {0};
1088 	ql_link_t	*link;
1089 	ql_tgt_t	*tq;
1090 	uint16_t	index;
1091 	uint16_t	inst = 0;
1092 
1093 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1094 
1095 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1096 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1097 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1098 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1099 		    cmd->ResponseLen);
1100 		cmd->ResponseLen = 0;
1101 		return;
1102 	}
1103 
1104 	for (link = NULL, index = 0;
1105 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1106 		for (link = ha->dev[index].first; link != NULL;
1107 		    link = link->next) {
1108 			tq = link->base_address;
1109 
1110 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1111 				continue;
1112 			}
1113 			if (inst != cmd->Instance) {
1114 				inst++;
1115 				continue;
1116 			}
1117 
1118 			/* fill in the values */
1119 			bcopy(tq->node_name, tmp_port.WWNN,
1120 			    EXT_DEF_WWN_NAME_SIZE);
1121 			bcopy(tq->port_name, tmp_port.WWPN,
1122 			    EXT_DEF_WWN_NAME_SIZE);
1123 
1124 			break;
1125 		}
1126 	}
1127 
1128 	if (link == NULL) {
1129 		/* no matching device */
1130 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1131 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1132 		cmd->ResponseLen = 0;
1133 		return;
1134 	}
1135 
1136 	tmp_port.Id[0] = 0;
1137 	tmp_port.Id[1] = tq->d_id.b.domain;
1138 	tmp_port.Id[2] = tq->d_id.b.area;
1139 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1140 
1141 	tmp_port.Type = 0;
1142 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1143 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1144 		    EXT_DEF_INITIATOR_DEV);
1145 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1146 		(void) ql_inq_scan(ha, tq, 1);
1147 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1148 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1149 	}
1150 
1151 	if (tq->flags & TQF_FABRIC_DEVICE) {
1152 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1153 	} else {
1154 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1155 	}
1156 
1157 	tmp_port.Status = 0;
1158 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1159 
1160 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1161 
1162 	if (ddi_copyout((void *)&tmp_port,
1163 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1164 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1165 		cmd->Status = EXT_STATUS_COPY_ERR;
1166 		cmd->ResponseLen = 0;
1167 		EL(ha, "failed, ddi_copyout\n");
1168 	} else {
1169 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1170 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1171 	}
1172 }
1173 
1174 /*
1175  * ql_qry_disc_tgt
1176  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1177  *
1178  * Input:
1179  *	ha:		adapter state pointer.
1180  *	cmd:		EXT_IOCTL cmd struct pointer.
1181  *	mode:		flags.
1182  *
1183  *	cmd->Instance = Port instance in fcport chain.
1184  *
1185  * Returns:
1186  *	None, request status indicated in cmd->Status.
1187  *
1188  * Context:
1189  *	Kernel context.
1190  */
1191 static void
1192 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1193 {
1194 	EXT_DISC_TARGET	tmp_tgt = {0};
1195 	ql_link_t	*link;
1196 	ql_tgt_t	*tq;
1197 	uint16_t	index;
1198 	uint16_t	inst = 0;
1199 
1200 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1201 	    cmd->Instance);
1202 
1203 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1204 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1205 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1206 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1207 		    cmd->ResponseLen);
1208 		cmd->ResponseLen = 0;
1209 		return;
1210 	}
1211 
1212 	/* Scan port list for requested target and fill in the values */
1213 	for (link = NULL, index = 0;
1214 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1215 		for (link = ha->dev[index].first; link != NULL;
1216 		    link = link->next) {
1217 			tq = link->base_address;
1218 
1219 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1220 			    tq->flags & TQF_INITIATOR_DEVICE) {
1221 				continue;
1222 			}
1223 			if (inst != cmd->Instance) {
1224 				inst++;
1225 				continue;
1226 			}
1227 
1228 			/* fill in the values */
1229 			bcopy(tq->node_name, tmp_tgt.WWNN,
1230 			    EXT_DEF_WWN_NAME_SIZE);
1231 			bcopy(tq->port_name, tmp_tgt.WWPN,
1232 			    EXT_DEF_WWN_NAME_SIZE);
1233 
1234 			break;
1235 		}
1236 	}
1237 
1238 	if (link == NULL) {
1239 		/* no matching device */
1240 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1241 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1242 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1243 		cmd->ResponseLen = 0;
1244 		return;
1245 	}
1246 	tmp_tgt.Id[0] = 0;
1247 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1248 	tmp_tgt.Id[2] = tq->d_id.b.area;
1249 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1250 
1251 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1252 
1253 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1254 		(void) ql_inq_scan(ha, tq, 1);
1255 	}
1256 
1257 	tmp_tgt.Type = 0;
1258 	if (tq->flags & TQF_TAPE_DEVICE) {
1259 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1260 	}
1261 
1262 	if (tq->flags & TQF_FABRIC_DEVICE) {
1263 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1264 	} else {
1265 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1266 	}
1267 
1268 	tmp_tgt.Status = 0;
1269 
1270 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1271 
1272 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1273 
1274 	if (ddi_copyout((void *)&tmp_tgt,
1275 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1276 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1277 		cmd->Status = EXT_STATUS_COPY_ERR;
1278 		cmd->ResponseLen = 0;
1279 		EL(ha, "failed, ddi_copyout\n");
1280 	} else {
1281 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1282 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1283 	}
1284 }
1285 
1286 /*
1287  * ql_qry_fw
1288  *	Performs EXT_SC_QUERY_FW subfunction.
1289  *
1290  * Input:
1291  *	ha:	adapter state pointer.
1292  *	cmd:	EXT_IOCTL cmd struct pointer.
1293  *	mode:	flags.
1294  *
1295  * Returns:
1296  *	None, request status indicated in cmd->Status.
1297  *
1298  * Context:
1299  *	Kernel context.
1300  */
1301 static void
1302 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1303 {
1304 	ql_mbx_data_t	mr;
1305 	EXT_FW		fw_info = {0};
1306 
1307 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1308 
1309 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1310 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1311 		cmd->DetailStatus = sizeof (EXT_FW);
1312 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1313 		    cmd->ResponseLen);
1314 		cmd->ResponseLen = 0;
1315 		return;
1316 	}
1317 
1318 	(void) ql_get_fw_version(ha, &mr);
1319 
1320 	(void) sprintf((char *)(fw_info.Version), "%d.%d.%d", mr.mb[1],
1321 	    mr.mb[2], mr.mb[2]);
1322 
1323 	fw_info.Attrib = mr.mb[6];
1324 
1325 	if (ddi_copyout((void *)&fw_info,
1326 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1327 	    sizeof (EXT_FW), mode) != 0) {
1328 		cmd->Status = EXT_STATUS_COPY_ERR;
1329 		cmd->ResponseLen = 0;
1330 		EL(ha, "failed, ddi_copyout\n");
1331 		return;
1332 	} else {
1333 		cmd->ResponseLen = sizeof (EXT_FW);
1334 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1335 	}
1336 }
1337 
1338 /*
1339  * ql_qry_chip
1340  *	Performs EXT_SC_QUERY_CHIP subfunction.
1341  *
1342  * Input:
1343  *	ha:	adapter state pointer.
1344  *	cmd:	EXT_IOCTL cmd struct pointer.
1345  *	mode:	flags.
1346  *
1347  * Returns:
1348  *	None, request status indicated in cmd->Status.
1349  *
1350  * Context:
1351  *	Kernel context.
1352  */
1353 static void
1354 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1355 {
1356 	EXT_CHIP	chip = {0};
1357 
1358 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1359 
1360 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1361 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1362 		cmd->DetailStatus = sizeof (EXT_CHIP);
1363 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1364 		    cmd->ResponseLen);
1365 		cmd->ResponseLen = 0;
1366 		return;
1367 	}
1368 
1369 	chip.VendorId = ha->ven_id;
1370 	chip.DeviceId = ha->device_id;
1371 	chip.SubVendorId = ha->subven_id;
1372 	chip.SubSystemId = ha->subsys_id;
1373 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1374 	chip.IoAddrLen = 0x100;
1375 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1376 	chip.MemAddrLen = 0x100;
1377 	chip.ChipRevID = ha->rev_id;
1378 	if (ha->flags & FUNCTION_1) {
1379 		chip.FuncNo = 1;
1380 	}
1381 
1382 	if (ddi_copyout((void *)&chip,
1383 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1384 	    sizeof (EXT_CHIP), mode) != 0) {
1385 		cmd->Status = EXT_STATUS_COPY_ERR;
1386 		cmd->ResponseLen = 0;
1387 		EL(ha, "failed, ddi_copyout\n");
1388 	} else {
1389 		cmd->ResponseLen = sizeof (EXT_CHIP);
1390 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1391 	}
1392 }
1393 
1394 /*
1395  * ql_qry_driver
1396  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1397  *
1398  * Input:
1399  *	ha:	adapter state pointer.
1400  *	cmd:	EXT_IOCTL cmd struct pointer.
1401  *	mode:	flags.
1402  *
1403  * Returns:
1404  *	None, request status indicated in cmd->Status.
1405  *
1406  * Context:
1407  *	Kernel context.
1408  */
1409 static void
1410 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1411 {
1412 	EXT_DRIVER	qd = {0};
1413 
1414 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1415 
1416 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1417 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1418 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1419 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1420 		    cmd->ResponseLen);
1421 		cmd->ResponseLen = 0;
1422 		return;
1423 	}
1424 
1425 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1426 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1427 	qd.TargetsPerBus = (uint16_t)
1428 	    (CFG_IST(ha, (CFG_CTRL_242581 | CFG_EXT_FW_INTERFACE)) ?
1429 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1430 	qd.LunsPerTarget = 2030;
1431 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1432 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1433 
1434 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1435 	    sizeof (EXT_DRIVER), mode) != 0) {
1436 		cmd->Status = EXT_STATUS_COPY_ERR;
1437 		cmd->ResponseLen = 0;
1438 		EL(ha, "failed, ddi_copyout\n");
1439 	} else {
1440 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1441 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1442 	}
1443 }
1444 
1445 /*
1446  * ql_fcct
1447  *	IOCTL management server FC-CT passthrough.
1448  *
1449  * Input:
1450  *	ha:	adapter state pointer.
1451  *	cmd:	User space CT arguments pointer.
1452  *	mode:	flags.
1453  *
1454  * Returns:
1455  *	None, request status indicated in cmd->Status.
1456  *
1457  * Context:
1458  *	Kernel context.
1459  */
1460 static void
1461 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1462 {
1463 	ql_mbx_iocb_t		*pkt;
1464 	ql_mbx_data_t		mr;
1465 	dma_mem_t		*dma_mem;
1466 	caddr_t			pld;
1467 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1468 	int			rval;
1469 	ql_ct_iu_preamble_t	*ct;
1470 	ql_xioctl_t		*xp = ha->xioctl;
1471 	ql_tgt_t		tq;
1472 	uint16_t		comp_status, loop_id;
1473 
1474 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1475 
1476 	/* Get CT argument structure. */
1477 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1478 		EL(ha, "failed, No switch\n");
1479 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1480 		cmd->ResponseLen = 0;
1481 		return;
1482 	}
1483 
1484 	if (DRIVER_SUSPENDED(ha)) {
1485 		EL(ha, "failed, LOOP_NOT_READY\n");
1486 		cmd->Status = EXT_STATUS_BUSY;
1487 		cmd->ResponseLen = 0;
1488 		return;
1489 	}
1490 
1491 	/* Login management server device. */
1492 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1493 		tq.d_id.b.al_pa = 0xfa;
1494 		tq.d_id.b.area = 0xff;
1495 		tq.d_id.b.domain = 0xff;
1496 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1497 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1498 		    MANAGEMENT_SERVER_LOOP_ID);
1499 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1500 		if (rval != QL_SUCCESS) {
1501 			EL(ha, "failed, server login\n");
1502 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1503 			cmd->ResponseLen = 0;
1504 			return;
1505 		} else {
1506 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1507 		}
1508 	}
1509 
1510 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1511 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1512 
1513 	/* Allocate a DMA Memory Descriptor */
1514 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1515 	if (dma_mem == NULL) {
1516 		EL(ha, "failed, kmem_zalloc\n");
1517 		cmd->Status = EXT_STATUS_NO_MEMORY;
1518 		cmd->ResponseLen = 0;
1519 		return;
1520 	}
1521 	/* Determine maximum buffer size. */
1522 	if (cmd->RequestLen < cmd->ResponseLen) {
1523 		pld_byte_cnt = cmd->ResponseLen;
1524 	} else {
1525 		pld_byte_cnt = cmd->RequestLen;
1526 	}
1527 
1528 	/* Allocate command block. */
1529 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1530 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1531 	if (pkt == NULL) {
1532 		EL(ha, "failed, kmem_zalloc\n");
1533 		cmd->Status = EXT_STATUS_NO_MEMORY;
1534 		cmd->ResponseLen = 0;
1535 		return;
1536 	}
1537 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1538 
1539 	/* Get command payload data. */
1540 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1541 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1542 		EL(ha, "failed, get_buffer_data\n");
1543 		kmem_free(pkt, pkt_size);
1544 		cmd->Status = EXT_STATUS_COPY_ERR;
1545 		cmd->ResponseLen = 0;
1546 		return;
1547 	}
1548 
1549 	/* Get DMA memory for the IOCB */
1550 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1551 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1552 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1553 		    "alloc failed", QL_NAME, ha->instance);
1554 		kmem_free(pkt, pkt_size);
1555 		kmem_free(dma_mem, sizeof (dma_mem_t));
1556 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1557 		cmd->ResponseLen = 0;
1558 		return;
1559 	}
1560 
1561 	/* Copy out going payload data to IOCB DMA buffer. */
1562 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1563 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1564 
1565 	/* Sync IOCB DMA buffer. */
1566 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1567 	    DDI_DMA_SYNC_FORDEV);
1568 
1569 	/*
1570 	 * Setup IOCB
1571 	 */
1572 	ct = (ql_ct_iu_preamble_t *)pld;
1573 	if (CFG_IST(ha, CFG_CTRL_242581)) {
1574 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1575 		pkt->ms24.entry_count = 1;
1576 
1577 		/* Set loop ID */
1578 		pkt->ms24.n_port_hdl = (uint16_t)
1579 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1580 		    LE_16(SNS_24XX_HDL) :
1581 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1582 
1583 		/* Set ISP command timeout. */
1584 		pkt->ms24.timeout = LE_16(120);
1585 
1586 		/* Set cmd/response data segment counts. */
1587 		pkt->ms24.cmd_dseg_count = LE_16(1);
1588 		pkt->ms24.resp_dseg_count = LE_16(1);
1589 
1590 		/* Load ct cmd byte count. */
1591 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1592 
1593 		/* Load ct rsp byte count. */
1594 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1595 
1596 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1597 
1598 		/* Load MS command entry data segments. */
1599 		*long_ptr++ = (uint32_t)
1600 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1601 		*long_ptr++ = (uint32_t)
1602 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1603 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1604 
1605 		/* Load MS response entry data segments. */
1606 		*long_ptr++ = (uint32_t)
1607 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1608 		*long_ptr++ = (uint32_t)
1609 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1610 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1611 
1612 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1613 		    sizeof (ql_mbx_iocb_t));
1614 
1615 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1616 		if (comp_status == CS_DATA_UNDERRUN) {
1617 			if ((BE_16(ct->max_residual_size)) == 0) {
1618 				comp_status = CS_COMPLETE;
1619 			}
1620 		}
1621 
1622 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1623 		    0) {
1624 			EL(ha, "failed, I/O timeout or "
1625 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1626 			    pkt->sts24.entry_status,
1627 			    pkt->sts24.scsi_status_l, rval);
1628 			kmem_free(pkt, pkt_size);
1629 			ql_free_dma_resource(ha, dma_mem);
1630 			kmem_free(dma_mem, sizeof (dma_mem_t));
1631 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1632 			cmd->ResponseLen = 0;
1633 			return;
1634 		}
1635 	} else {
1636 		pkt->ms.entry_type = MS_TYPE;
1637 		pkt->ms.entry_count = 1;
1638 
1639 		/* Set loop ID */
1640 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1641 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1642 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1643 			pkt->ms.loop_id_l = LSB(loop_id);
1644 			pkt->ms.loop_id_h = MSB(loop_id);
1645 		} else {
1646 			pkt->ms.loop_id_h = LSB(loop_id);
1647 		}
1648 
1649 		/* Set ISP command timeout. */
1650 		pkt->ms.timeout = LE_16(120);
1651 
1652 		/* Set data segment counts. */
1653 		pkt->ms.cmd_dseg_count_l = 1;
1654 		pkt->ms.total_dseg_count = LE_16(2);
1655 
1656 		/* Response total byte count. */
1657 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1658 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1659 
1660 		/* Command total byte count. */
1661 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1662 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1663 
1664 		/* Load command/response data segments. */
1665 		pkt->ms.dseg_0_address[0] = (uint32_t)
1666 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1667 		pkt->ms.dseg_0_address[1] = (uint32_t)
1668 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1669 		pkt->ms.dseg_1_address[0] = (uint32_t)
1670 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1671 		pkt->ms.dseg_1_address[1] = (uint32_t)
1672 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1673 
1674 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1675 		    sizeof (ql_mbx_iocb_t));
1676 
1677 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1678 		if (comp_status == CS_DATA_UNDERRUN) {
1679 			if ((BE_16(ct->max_residual_size)) == 0) {
1680 				comp_status = CS_COMPLETE;
1681 			}
1682 		}
1683 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1684 			EL(ha, "failed, I/O timeout or "
1685 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1686 			kmem_free(pkt, pkt_size);
1687 			ql_free_dma_resource(ha, dma_mem);
1688 			kmem_free(dma_mem, sizeof (dma_mem_t));
1689 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1690 			cmd->ResponseLen = 0;
1691 			return;
1692 		}
1693 	}
1694 
1695 	/* Sync in coming DMA buffer. */
1696 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1697 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1698 	/* Copy in coming DMA data. */
1699 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1700 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1701 	    DDI_DEV_AUTOINCR);
1702 
1703 	/* Copy response payload from DMA buffer to application. */
1704 	if (cmd->ResponseLen != 0) {
1705 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1706 		    cmd->ResponseLen);
1707 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1708 
1709 		/* Send response payload. */
1710 		if (ql_send_buffer_data(pld,
1711 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1712 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1713 			EL(ha, "failed, send_buffer_data\n");
1714 			cmd->Status = EXT_STATUS_COPY_ERR;
1715 			cmd->ResponseLen = 0;
1716 		}
1717 	}
1718 
1719 	kmem_free(pkt, pkt_size);
1720 	ql_free_dma_resource(ha, dma_mem);
1721 	kmem_free(dma_mem, sizeof (dma_mem_t));
1722 
1723 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1724 }
1725 
1726 /*
1727  * ql_aen_reg
1728  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1729  *
1730  * Input:
1731  *	ha:	adapter state pointer.
1732  *	cmd:	EXT_IOCTL cmd struct pointer.
1733  *	mode:	flags.
1734  *
1735  * Returns:
1736  *	None, request status indicated in cmd->Status.
1737  *
1738  * Context:
1739  *	Kernel context.
1740  */
1741 static void
1742 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1743 {
1744 	EXT_REG_AEN	reg_struct;
1745 	int		rval = 0;
1746 	ql_xioctl_t	*xp = ha->xioctl;
1747 
1748 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1749 
1750 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1751 	    cmd->RequestLen, mode);
1752 
1753 	if (rval == 0) {
1754 		if (reg_struct.Enable) {
1755 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1756 		} else {
1757 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1758 			/* Empty the queue. */
1759 			INTR_LOCK(ha);
1760 			xp->aen_q_head = 0;
1761 			xp->aen_q_tail = 0;
1762 			INTR_UNLOCK(ha);
1763 		}
1764 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1765 	} else {
1766 		cmd->Status = EXT_STATUS_COPY_ERR;
1767 		EL(ha, "failed, ddi_copyin\n");
1768 	}
1769 }
1770 
1771 /*
1772  * ql_aen_get
1773  *	IOCTL management server Asynchronous Event Record Transfer.
1774  *
1775  * Input:
1776  *	ha:	adapter state pointer.
1777  *	cmd:	EXT_IOCTL cmd struct pointer.
1778  *	mode:	flags.
1779  *
1780  * Returns:
1781  *	None, request status indicated in cmd->Status.
1782  *
1783  * Context:
1784  *	Kernel context.
1785  */
1786 static void
1787 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1788 {
1789 	uint32_t	out_size;
1790 	EXT_ASYNC_EVENT	*tmp_q;
1791 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1792 	uint8_t		i;
1793 	uint8_t		queue_cnt;
1794 	uint8_t		request_cnt;
1795 	ql_xioctl_t	*xp = ha->xioctl;
1796 
1797 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1798 
1799 	/* Compute the number of events that can be returned */
1800 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1801 
1802 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1803 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1804 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1805 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1806 		    "Len=%xh\n", request_cnt);
1807 		cmd->ResponseLen = 0;
1808 		return;
1809 	}
1810 
1811 	/* 1st: Make a local copy of the entire queue content. */
1812 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1813 	queue_cnt = 0;
1814 
1815 	INTR_LOCK(ha);
1816 	i = xp->aen_q_head;
1817 
1818 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1819 		if (tmp_q[i].AsyncEventCode != 0) {
1820 			bcopy(&tmp_q[i], &aen[queue_cnt],
1821 			    sizeof (EXT_ASYNC_EVENT));
1822 			queue_cnt++;
1823 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1824 		}
1825 		if (i == xp->aen_q_tail) {
1826 			/* done. */
1827 			break;
1828 		}
1829 		i++;
1830 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1831 			i = 0;
1832 		}
1833 	}
1834 
1835 	/* Empty the queue. */
1836 	xp->aen_q_head = 0;
1837 	xp->aen_q_tail = 0;
1838 
1839 	INTR_UNLOCK(ha);
1840 
1841 	/* 2nd: Now transfer the queue content to user buffer */
1842 	/* Copy the entire queue to user's buffer. */
1843 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1844 	if (queue_cnt == 0) {
1845 		cmd->ResponseLen = 0;
1846 	} else if (ddi_copyout((void *)&aen[0],
1847 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1848 	    out_size, mode) != 0) {
1849 		cmd->Status = EXT_STATUS_COPY_ERR;
1850 		cmd->ResponseLen = 0;
1851 		EL(ha, "failed, ddi_copyout\n");
1852 	} else {
1853 		cmd->ResponseLen = out_size;
1854 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1855 	}
1856 }
1857 
1858 /*
1859  * ql_enqueue_aen
1860  *
1861  * Input:
1862  *	ha:		adapter state pointer.
1863  *	event_code:	async event code of the event to add to queue.
1864  *	payload:	event payload for the queue.
1865  *	INTR_LOCK must be already obtained.
1866  *
1867  * Context:
1868  *	Interrupt or Kernel context, no mailbox commands allowed.
1869  */
1870 void
1871 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1872 {
1873 	uint8_t			new_entry;	/* index to current entry */
1874 	uint16_t		*mbx;
1875 	EXT_ASYNC_EVENT		*aen_queue;
1876 	ql_xioctl_t		*xp = ha->xioctl;
1877 
1878 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1879 	    event_code);
1880 
1881 	if (xp == NULL) {
1882 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1883 		return;
1884 	}
1885 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1886 
1887 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1888 		/* Need to change queue pointers to make room. */
1889 
1890 		/* Increment tail for adding new entry. */
1891 		xp->aen_q_tail++;
1892 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1893 			xp->aen_q_tail = 0;
1894 		}
1895 		if (xp->aen_q_head == xp->aen_q_tail) {
1896 			/*
1897 			 * We're overwriting the oldest entry, so need to
1898 			 * update the head pointer.
1899 			 */
1900 			xp->aen_q_head++;
1901 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1902 				xp->aen_q_head = 0;
1903 			}
1904 		}
1905 	}
1906 
1907 	new_entry = xp->aen_q_tail;
1908 	aen_queue[new_entry].AsyncEventCode = event_code;
1909 
1910 	/* Update payload */
1911 	if (payload != NULL) {
1912 		switch (event_code) {
1913 		case MBA_LIP_OCCURRED:
1914 		case MBA_LOOP_UP:
1915 		case MBA_LOOP_DOWN:
1916 		case MBA_LIP_F8:
1917 		case MBA_LIP_RESET:
1918 		case MBA_PORT_UPDATE:
1919 			break;
1920 		case MBA_RSCN_UPDATE:
1921 			mbx = (uint16_t *)payload;
1922 			/* al_pa */
1923 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1924 			    LSB(mbx[2]);
1925 			/* area */
1926 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1927 			    MSB(mbx[2]);
1928 			/* domain */
1929 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1930 			    LSB(mbx[1]);
1931 			/* save in big endian */
1932 			BIG_ENDIAN_24(&aen_queue[new_entry].
1933 			    Payload.RSCN.RSCNInfo[0]);
1934 
1935 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1936 			    MSB(mbx[1]);
1937 
1938 			break;
1939 		default:
1940 			/* Not supported */
1941 			EL(ha, "failed, event code not supported=%xh\n",
1942 			    event_code);
1943 			aen_queue[new_entry].AsyncEventCode = 0;
1944 			break;
1945 		}
1946 	}
1947 
1948 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1949 }
1950 
1951 /*
1952  * ql_scsi_passthru
1953  *	IOCTL SCSI passthrough.
1954  *
1955  * Input:
1956  *	ha:	adapter state pointer.
1957  *	cmd:	User space SCSI command pointer.
1958  *	mode:	flags.
1959  *
1960  * Returns:
1961  *	None, request status indicated in cmd->Status.
1962  *
1963  * Context:
1964  *	Kernel context.
1965  */
1966 static void
1967 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1968 {
1969 	ql_mbx_iocb_t		*pkt;
1970 	ql_mbx_data_t		mr;
1971 	dma_mem_t		*dma_mem;
1972 	caddr_t			pld;
1973 	uint32_t		pkt_size, pld_size;
1974 	uint16_t		qlnt, retries, cnt, cnt2;
1975 	uint8_t			*name;
1976 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1977 	EXT_SCSI_PASSTHRU	*usp_req;
1978 	int			rval;
1979 	union _passthru {
1980 		EXT_SCSI_PASSTHRU	sp_cmd;
1981 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1982 	} pt_req;		/* Passthru request */
1983 	uint32_t		status, sense_sz = 0;
1984 	ql_tgt_t		*tq = NULL;
1985 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1986 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1987 
1988 	/* SCSI request struct for SCSI passthrough IOs. */
1989 	struct {
1990 		uint16_t	lun;
1991 		uint16_t	sense_length;	/* Sense buffer size */
1992 		size_t		resid;		/* Residual */
1993 		uint8_t		*cdbp;		/* Requestor's CDB */
1994 		uint8_t		*u_sense;	/* Requestor's sense buffer */
1995 		uint8_t		cdb_len;	/* Requestor's CDB length */
1996 		uint8_t		direction;
1997 	} scsi_req;
1998 
1999 	struct {
2000 		uint8_t		*rsp_info;
2001 		uint8_t		*req_sense_data;
2002 		uint32_t	residual_length;
2003 		uint32_t	rsp_info_length;
2004 		uint32_t	req_sense_length;
2005 		uint16_t	comp_status;
2006 		uint8_t		state_flags_l;
2007 		uint8_t		state_flags_h;
2008 		uint8_t		scsi_status_l;
2009 		uint8_t		scsi_status_h;
2010 	} sts;
2011 
2012 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2013 
2014 	/* Verify Sub Code and set cnt to needed request size. */
2015 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2016 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2017 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2018 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2019 	} else {
2020 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2021 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2022 		cmd->ResponseLen = 0;
2023 		return;
2024 	}
2025 
2026 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2027 	if (dma_mem == NULL) {
2028 		EL(ha, "failed, kmem_zalloc\n");
2029 		cmd->Status = EXT_STATUS_NO_MEMORY;
2030 		cmd->ResponseLen = 0;
2031 		return;
2032 	}
2033 	/*  Verify the size of and copy in the passthru request structure. */
2034 	if (cmd->RequestLen != pld_size) {
2035 		/* Return error */
2036 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2037 		    cmd->RequestLen, pld_size);
2038 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2039 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2040 		cmd->ResponseLen = 0;
2041 		return;
2042 	}
2043 
2044 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2045 	    pld_size, mode) != 0) {
2046 		EL(ha, "failed, ddi_copyin\n");
2047 		cmd->Status = EXT_STATUS_COPY_ERR;
2048 		cmd->ResponseLen = 0;
2049 		return;
2050 	}
2051 
2052 	/*
2053 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2054 	 * request data structure.
2055 	 */
2056 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2057 		scsi_req.lun = sp_req->TargetAddr.Lun;
2058 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2059 		scsi_req.cdbp = &sp_req->Cdb[0];
2060 		scsi_req.cdb_len = sp_req->CdbLength;
2061 		scsi_req.direction = sp_req->Direction;
2062 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2063 		scsi_req.u_sense = &usp_req->SenseData[0];
2064 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2065 
2066 		qlnt = QLNT_PORT;
2067 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2068 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2069 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2070 		tq = ql_find_port(ha, name, qlnt);
2071 	} else {
2072 		/*
2073 		 * Must be FC PASSTHRU, verified above.
2074 		 */
2075 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2076 			qlnt = QLNT_PORT;
2077 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2078 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2079 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2080 			    ha->instance, cmd->SubCode, name[0], name[1],
2081 			    name[2], name[3], name[4], name[5], name[6],
2082 			    name[7]);
2083 			tq = ql_find_port(ha, name, qlnt);
2084 		} else if (fc_req->FCScsiAddr.DestType ==
2085 		    EXT_DEF_DESTTYPE_WWNN) {
2086 			qlnt = QLNT_NODE;
2087 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2088 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2089 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2090 			    ha->instance, cmd->SubCode, name[0], name[1],
2091 			    name[2], name[3], name[4], name[5], name[6],
2092 			    name[7]);
2093 			tq = ql_find_port(ha, name, qlnt);
2094 		} else if (fc_req->FCScsiAddr.DestType ==
2095 		    EXT_DEF_DESTTYPE_PORTID) {
2096 			qlnt = QLNT_PID;
2097 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2098 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2099 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2100 			    name[0], name[1], name[2]);
2101 			tq = ql_find_port(ha, name, qlnt);
2102 		} else {
2103 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2104 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2105 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2106 			cmd->ResponseLen = 0;
2107 			return;
2108 		}
2109 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2110 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2111 		scsi_req.cdbp = &sp_req->Cdb[0];
2112 		scsi_req.cdb_len = sp_req->CdbLength;
2113 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2114 		scsi_req.u_sense = &ufc_req->SenseData[0];
2115 		scsi_req.direction = fc_req->Direction;
2116 	}
2117 
2118 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2119 		EL(ha, "failed, fc_port not found\n");
2120 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2121 		cmd->ResponseLen = 0;
2122 		return;
2123 	}
2124 
2125 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2126 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2127 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2128 		cmd->ResponseLen = 0;
2129 		return;
2130 	}
2131 
2132 	/* Allocate command block. */
2133 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2134 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2135 	    cmd->ResponseLen) {
2136 		pld_size = cmd->ResponseLen;
2137 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2138 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2139 		if (pkt == NULL) {
2140 			EL(ha, "failed, kmem_zalloc\n");
2141 			cmd->Status = EXT_STATUS_NO_MEMORY;
2142 			cmd->ResponseLen = 0;
2143 			return;
2144 		}
2145 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2146 
2147 		/* Get DMA memory for the IOCB */
2148 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2149 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2150 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2151 			    "alloc failed", QL_NAME, ha->instance);
2152 			kmem_free(pkt, pkt_size);
2153 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2154 			cmd->ResponseLen = 0;
2155 			return;
2156 		}
2157 
2158 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2159 			scsi_req.direction = (uint8_t)
2160 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2161 			    CF_RD : CF_DATA_IN | CF_STAG);
2162 		} else {
2163 			scsi_req.direction = (uint8_t)
2164 			    (CFG_IST(ha, CFG_CTRL_242581) ?
2165 			    CF_WR : CF_DATA_OUT | CF_STAG);
2166 			cmd->ResponseLen = 0;
2167 
2168 			/* Get command payload. */
2169 			if (ql_get_buffer_data(
2170 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2171 			    pld, pld_size, mode) != pld_size) {
2172 				EL(ha, "failed, get_buffer_data\n");
2173 				cmd->Status = EXT_STATUS_COPY_ERR;
2174 
2175 				kmem_free(pkt, pkt_size);
2176 				ql_free_dma_resource(ha, dma_mem);
2177 				kmem_free(dma_mem, sizeof (dma_mem_t));
2178 				return;
2179 			}
2180 
2181 			/* Copy out going data to DMA buffer. */
2182 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2183 			    (uint8_t *)dma_mem->bp, pld_size,
2184 			    DDI_DEV_AUTOINCR);
2185 
2186 			/* Sync DMA buffer. */
2187 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2188 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2189 		}
2190 	} else {
2191 		scsi_req.direction = (uint8_t)
2192 		    (CFG_IST(ha, CFG_CTRL_242581) ? 0 : CF_STAG);
2193 		cmd->ResponseLen = 0;
2194 
2195 		pkt_size = sizeof (ql_mbx_iocb_t);
2196 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2197 		if (pkt == NULL) {
2198 			EL(ha, "failed, kmem_zalloc-2\n");
2199 			cmd->Status = EXT_STATUS_NO_MEMORY;
2200 			return;
2201 		}
2202 		pld = NULL;
2203 		pld_size = 0;
2204 	}
2205 
2206 	/* retries = ha->port_down_retry_count; */
2207 	retries = 1;
2208 	cmd->Status = EXT_STATUS_OK;
2209 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2210 
2211 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2212 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2213 
2214 	do {
2215 		if (DRIVER_SUSPENDED(ha)) {
2216 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2217 			break;
2218 		}
2219 
2220 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2221 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2222 			pkt->cmd24.entry_count = 1;
2223 
2224 			/* Set LUN number */
2225 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2226 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2227 
2228 			/* Set N_port handle */
2229 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2230 
2231 			/* Set VP Index */
2232 			pkt->cmd24.vp_index = ha->vp_index;
2233 
2234 			/* Set target ID */
2235 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2236 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2237 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2238 
2239 			/* Set ISP command timeout. */
2240 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2241 
2242 			/* Load SCSI CDB */
2243 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2244 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2245 			    DDI_DEV_AUTOINCR);
2246 			for (cnt = 0; cnt < MAX_CMDSZ;
2247 			    cnt = (uint16_t)(cnt + 4)) {
2248 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2249 				    + cnt, 4);
2250 			}
2251 
2252 			/* Set tag queue control flags */
2253 			pkt->cmd24.task = TA_STAG;
2254 
2255 			if (pld_size) {
2256 				/* Set transfer direction. */
2257 				pkt->cmd24.control_flags = scsi_req.direction;
2258 
2259 				/* Set data segment count. */
2260 				pkt->cmd24.dseg_count = LE_16(1);
2261 
2262 				/* Load total byte count. */
2263 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2264 
2265 				/* Load data descriptor. */
2266 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2267 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2268 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2269 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2270 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2271 			}
2272 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2273 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2274 			pkt->cmd3.entry_count = 1;
2275 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2276 				pkt->cmd3.target_l = LSB(tq->loop_id);
2277 				pkt->cmd3.target_h = MSB(tq->loop_id);
2278 			} else {
2279 				pkt->cmd3.target_h = LSB(tq->loop_id);
2280 			}
2281 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2282 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2283 			pkt->cmd3.control_flags_l = scsi_req.direction;
2284 			pkt->cmd3.timeout = LE_16(15);
2285 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2286 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2287 			}
2288 			if (pld_size) {
2289 				pkt->cmd3.dseg_count = LE_16(1);
2290 				pkt->cmd3.byte_count = LE_32(pld_size);
2291 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2292 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2293 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2294 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2295 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2296 			}
2297 		} else {
2298 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2299 			pkt->cmd.entry_count = 1;
2300 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2301 				pkt->cmd.target_l = LSB(tq->loop_id);
2302 				pkt->cmd.target_h = MSB(tq->loop_id);
2303 			} else {
2304 				pkt->cmd.target_h = LSB(tq->loop_id);
2305 			}
2306 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2307 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2308 			pkt->cmd.control_flags_l = scsi_req.direction;
2309 			pkt->cmd.timeout = LE_16(15);
2310 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2311 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2312 			}
2313 			if (pld_size) {
2314 				pkt->cmd.dseg_count = LE_16(1);
2315 				pkt->cmd.byte_count = LE_32(pld_size);
2316 				pkt->cmd.dseg_0_address = (uint32_t)
2317 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2318 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2319 			}
2320 		}
2321 		/* Go issue command and wait for completion. */
2322 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2323 		QL_DUMP_9(pkt, 8, pkt_size);
2324 
2325 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2326 
2327 		if (pld_size) {
2328 			/* Sync in coming DMA buffer. */
2329 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2330 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2331 			/* Copy in coming DMA data. */
2332 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2333 			    (uint8_t *)dma_mem->bp, pld_size,
2334 			    DDI_DEV_AUTOINCR);
2335 		}
2336 
2337 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2338 			pkt->sts24.entry_status = (uint8_t)
2339 			    (pkt->sts24.entry_status & 0x3c);
2340 		} else {
2341 			pkt->sts.entry_status = (uint8_t)
2342 			    (pkt->sts.entry_status & 0x7e);
2343 		}
2344 
2345 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2346 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2347 			    pkt->sts.entry_status, tq->d_id.b24);
2348 			status = QL_FUNCTION_PARAMETER_ERROR;
2349 		}
2350 
2351 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2352 		    LE_16(pkt->sts24.comp_status) :
2353 		    LE_16(pkt->sts.comp_status));
2354 
2355 		/*
2356 		 * We have verified about all the request that can be so far.
2357 		 * Now we need to start verification of our ability to
2358 		 * actually issue the CDB.
2359 		 */
2360 		if (DRIVER_SUSPENDED(ha)) {
2361 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2362 			break;
2363 		} else if (status == QL_SUCCESS &&
2364 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2365 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2366 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2367 			if (tq->flags & TQF_FABRIC_DEVICE) {
2368 				rval = ql_login_fport(ha, tq, tq->loop_id,
2369 				    LFF_NO_PLOGI, &mr);
2370 				if (rval != QL_SUCCESS) {
2371 					EL(ha, "failed, login_fport=%xh, "
2372 					    "d_id=%xh\n", rval, tq->d_id.b24);
2373 				}
2374 			} else {
2375 				rval = ql_login_lport(ha, tq, tq->loop_id,
2376 				    LLF_NONE);
2377 				if (rval != QL_SUCCESS) {
2378 					EL(ha, "failed, login_lport=%xh, "
2379 					    "d_id=%xh\n", rval, tq->d_id.b24);
2380 				}
2381 			}
2382 		} else {
2383 			break;
2384 		}
2385 
2386 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2387 
2388 	} while (retries--);
2389 
2390 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2391 		/* Cannot issue command now, maybe later */
2392 		EL(ha, "failed, suspended\n");
2393 		kmem_free(pkt, pkt_size);
2394 		ql_free_dma_resource(ha, dma_mem);
2395 		kmem_free(dma_mem, sizeof (dma_mem_t));
2396 		cmd->Status = EXT_STATUS_SUSPENDED;
2397 		cmd->ResponseLen = 0;
2398 		return;
2399 	}
2400 
2401 	if (status != QL_SUCCESS) {
2402 		/* Command error */
2403 		EL(ha, "failed, I/O\n");
2404 		kmem_free(pkt, pkt_size);
2405 		ql_free_dma_resource(ha, dma_mem);
2406 		kmem_free(dma_mem, sizeof (dma_mem_t));
2407 		cmd->Status = EXT_STATUS_ERR;
2408 		cmd->DetailStatus = status;
2409 		cmd->ResponseLen = 0;
2410 		return;
2411 	}
2412 
2413 	/* Setup status. */
2414 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2415 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2416 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2417 
2418 		/* Setup residuals. */
2419 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2420 
2421 		/* Setup state flags. */
2422 		sts.state_flags_l = pkt->sts24.state_flags_l;
2423 		sts.state_flags_h = pkt->sts24.state_flags_h;
2424 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2425 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2426 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2427 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2428 		} else {
2429 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2430 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2431 			    SF_GOT_STATUS);
2432 		}
2433 		if (scsi_req.direction & CF_WR) {
2434 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2435 			    SF_DATA_OUT);
2436 		} else if (scsi_req.direction & CF_RD) {
2437 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2438 			    SF_DATA_IN);
2439 		}
2440 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2441 
2442 		/* Setup FCP response info. */
2443 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2444 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2445 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2446 		for (cnt = 0; cnt < sts.rsp_info_length;
2447 		    cnt = (uint16_t)(cnt + 4)) {
2448 			ql_chg_endian(sts.rsp_info + cnt, 4);
2449 		}
2450 
2451 		/* Setup sense data. */
2452 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2453 			sts.req_sense_length =
2454 			    LE_32(pkt->sts24.fcp_sense_length);
2455 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2456 			    SF_ARQ_DONE);
2457 		} else {
2458 			sts.req_sense_length = 0;
2459 		}
2460 		sts.req_sense_data =
2461 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2462 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2463 		    (uintptr_t)sts.req_sense_data);
2464 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2465 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2466 		}
2467 	} else {
2468 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2469 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2470 
2471 		/* Setup residuals. */
2472 		sts.residual_length = LE_32(pkt->sts.residual_length);
2473 
2474 		/* Setup state flags. */
2475 		sts.state_flags_l = pkt->sts.state_flags_l;
2476 		sts.state_flags_h = pkt->sts.state_flags_h;
2477 
2478 		/* Setup FCP response info. */
2479 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2480 		    LE_16(pkt->sts.rsp_info_length) : 0;
2481 		sts.rsp_info = &pkt->sts.rsp_info[0];
2482 
2483 		/* Setup sense data. */
2484 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2485 		    LE_16(pkt->sts.req_sense_length) : 0;
2486 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2487 	}
2488 
2489 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2490 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2491 
2492 	switch (sts.comp_status) {
2493 	case CS_INCOMPLETE:
2494 	case CS_ABORTED:
2495 	case CS_DEVICE_UNAVAILABLE:
2496 	case CS_PORT_UNAVAILABLE:
2497 	case CS_PORT_LOGGED_OUT:
2498 	case CS_PORT_CONFIG_CHG:
2499 	case CS_PORT_BUSY:
2500 	case CS_LOOP_DOWN_ABORT:
2501 		cmd->Status = EXT_STATUS_BUSY;
2502 		break;
2503 	case CS_RESET:
2504 	case CS_QUEUE_FULL:
2505 		cmd->Status = EXT_STATUS_ERR;
2506 		break;
2507 	case CS_TIMEOUT:
2508 		cmd->Status = EXT_STATUS_ERR;
2509 		break;
2510 	case CS_DATA_OVERRUN:
2511 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2512 		break;
2513 	case CS_DATA_UNDERRUN:
2514 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2515 		break;
2516 	}
2517 
2518 	/*
2519 	 * If non data transfer commands fix tranfer counts.
2520 	 */
2521 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2522 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2523 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2524 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2525 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2526 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2527 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2528 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2529 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2530 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2531 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2532 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2533 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2534 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2535 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2536 		/*
2537 		 * Non data transfer command, clear sts_entry residual
2538 		 * length.
2539 		 */
2540 		sts.residual_length = 0;
2541 		cmd->ResponseLen = 0;
2542 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2543 			sts.comp_status = CS_COMPLETE;
2544 			cmd->Status = EXT_STATUS_OK;
2545 		}
2546 	} else {
2547 		cmd->ResponseLen = pld_size;
2548 	}
2549 
2550 	/* Correct ISP completion status */
2551 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2552 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2553 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2554 		    ha->instance);
2555 		scsi_req.resid = 0;
2556 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2557 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2558 		    ha->instance);
2559 		scsi_req.resid = sts.residual_length;
2560 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2561 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2562 
2563 			cmd->ResponseLen = (uint32_t)
2564 			    (pld_size - scsi_req.resid);
2565 		} else {
2566 			EL(ha, "failed, Transfer ERROR\n");
2567 			cmd->Status = EXT_STATUS_ERR;
2568 			cmd->ResponseLen = 0;
2569 		}
2570 	} else {
2571 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2572 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2573 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2574 		    sts.scsi_status_l);
2575 
2576 		scsi_req.resid = pld_size;
2577 		/*
2578 		 * Handle residual count on SCSI check
2579 		 * condition.
2580 		 *
2581 		 * - If Residual Under / Over is set, use the
2582 		 *   Residual Transfer Length field in IOCB.
2583 		 * - If Residual Under / Over is not set, and
2584 		 *   Transferred Data bit is set in State Flags
2585 		 *   field of IOCB, report residual value of 0
2586 		 *   (you may want to do this for tape
2587 		 *   Write-type commands only). This takes care
2588 		 *   of logical end of tape problem and does
2589 		 *   not break Unit Attention.
2590 		 * - If Residual Under / Over is not set, and
2591 		 *   Transferred Data bit is not set in State
2592 		 *   Flags, report residual value equal to
2593 		 *   original data transfer length.
2594 		 */
2595 		if (sts.scsi_status_l & STATUS_CHECK) {
2596 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2597 			cmd->DetailStatus = sts.scsi_status_l;
2598 			if (sts.scsi_status_h &
2599 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2600 				scsi_req.resid = sts.residual_length;
2601 			} else if (sts.state_flags_h &
2602 			    STATE_XFERRED_DATA) {
2603 				scsi_req.resid = 0;
2604 			}
2605 		}
2606 	}
2607 
2608 	if (sts.scsi_status_l & STATUS_CHECK &&
2609 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2610 	    sts.req_sense_length) {
2611 		/*
2612 		 * Check condition with vaild sense data flag set and sense
2613 		 * length != 0
2614 		 */
2615 		if (sts.req_sense_length > scsi_req.sense_length) {
2616 			sense_sz = scsi_req.sense_length;
2617 		} else {
2618 			sense_sz = sts.req_sense_length;
2619 		}
2620 
2621 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2622 		    tq->d_id.b24);
2623 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2624 
2625 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2626 		    (size_t)sense_sz, mode) != 0) {
2627 			EL(ha, "failed, request sense ddi_copyout\n");
2628 		}
2629 
2630 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2631 		cmd->DetailStatus = sts.scsi_status_l;
2632 	}
2633 
2634 	/* Copy response payload from DMA buffer to application. */
2635 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2636 	    cmd->ResponseLen != 0) {
2637 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2638 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2639 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2640 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2641 
2642 		/* Send response payload. */
2643 		if (ql_send_buffer_data(pld,
2644 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2645 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2646 			EL(ha, "failed, send_buffer_data\n");
2647 			cmd->Status = EXT_STATUS_COPY_ERR;
2648 			cmd->ResponseLen = 0;
2649 		}
2650 	}
2651 
2652 	if (cmd->Status != EXT_STATUS_OK) {
2653 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2654 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2655 	} else {
2656 		/*EMPTY*/
2657 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2658 		    ha->instance, cmd->ResponseLen);
2659 	}
2660 
2661 	kmem_free(pkt, pkt_size);
2662 	ql_free_dma_resource(ha, dma_mem);
2663 	kmem_free(dma_mem, sizeof (dma_mem_t));
2664 }
2665 
2666 /*
2667  * ql_wwpn_to_scsiaddr
2668  *
2669  * Input:
2670  *	ha:	adapter state pointer.
2671  *	cmd:	EXT_IOCTL cmd struct pointer.
2672  *	mode:	flags.
2673  *
2674  * Context:
2675  *	Kernel context.
2676  */
2677 static void
2678 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2679 {
2680 	int		status;
2681 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2682 	EXT_SCSI_ADDR	*tmp_addr;
2683 	ql_tgt_t	*tq;
2684 
2685 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2686 
2687 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2688 		/* Return error */
2689 		EL(ha, "incorrect RequestLen\n");
2690 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2691 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2692 		return;
2693 	}
2694 
2695 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2696 	    cmd->RequestLen, mode);
2697 
2698 	if (status != 0) {
2699 		cmd->Status = EXT_STATUS_COPY_ERR;
2700 		EL(ha, "failed, ddi_copyin\n");
2701 		return;
2702 	}
2703 
2704 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2705 
2706 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2707 		/* no matching device */
2708 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2709 		EL(ha, "failed, device not found\n");
2710 		return;
2711 	}
2712 
2713 	/* Copy out the IDs found.  For now we can only return target ID. */
2714 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2715 
2716 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2717 
2718 	if (status != 0) {
2719 		cmd->Status = EXT_STATUS_COPY_ERR;
2720 		EL(ha, "failed, ddi_copyout\n");
2721 	} else {
2722 		cmd->Status = EXT_STATUS_OK;
2723 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2724 	}
2725 }
2726 
2727 /*
2728  * ql_host_idx
2729  *	Gets host order index.
2730  *
2731  * Input:
2732  *	ha:	adapter state pointer.
2733  *	cmd:	EXT_IOCTL cmd struct pointer.
2734  *	mode:	flags.
2735  *
2736  * Returns:
2737  *	None, request status indicated in cmd->Status.
2738  *
2739  * Context:
2740  *	Kernel context.
2741  */
2742 static void
2743 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2744 {
2745 	uint16_t	idx;
2746 
2747 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2748 
2749 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2750 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2751 		cmd->DetailStatus = sizeof (uint16_t);
2752 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2753 		cmd->ResponseLen = 0;
2754 		return;
2755 	}
2756 
2757 	idx = (uint16_t)ha->instance;
2758 
2759 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2760 	    sizeof (uint16_t), mode) != 0) {
2761 		cmd->Status = EXT_STATUS_COPY_ERR;
2762 		cmd->ResponseLen = 0;
2763 		EL(ha, "failed, ddi_copyout\n");
2764 	} else {
2765 		cmd->ResponseLen = sizeof (uint16_t);
2766 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2767 	}
2768 }
2769 
2770 /*
2771  * ql_host_drvname
2772  *	Gets host driver name
2773  *
2774  * Input:
2775  *	ha:	adapter state pointer.
2776  *	cmd:	EXT_IOCTL cmd struct pointer.
2777  *	mode:	flags.
2778  *
2779  * Returns:
2780  *	None, request status indicated in cmd->Status.
2781  *
2782  * Context:
2783  *	Kernel context.
2784  */
2785 static void
2786 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2787 {
2788 
2789 	char		drvname[] = QL_NAME;
2790 	uint32_t	qlnamelen;
2791 
2792 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2793 
2794 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2795 
2796 	if (cmd->ResponseLen < qlnamelen) {
2797 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2798 		cmd->DetailStatus = qlnamelen;
2799 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2800 		    cmd->ResponseLen, qlnamelen);
2801 		cmd->ResponseLen = 0;
2802 		return;
2803 	}
2804 
2805 	if (ddi_copyout((void *)&drvname,
2806 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2807 	    qlnamelen, mode) != 0) {
2808 		cmd->Status = EXT_STATUS_COPY_ERR;
2809 		cmd->ResponseLen = 0;
2810 		EL(ha, "failed, ddi_copyout\n");
2811 	} else {
2812 		cmd->ResponseLen = qlnamelen-1;
2813 	}
2814 
2815 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2816 }
2817 
2818 /*
2819  * ql_read_nvram
2820  *	Get NVRAM contents.
2821  *
2822  * Input:
2823  *	ha:	adapter state pointer.
2824  *	cmd:	EXT_IOCTL cmd struct pointer.
2825  *	mode:	flags.
2826  *
2827  * Returns:
2828  *	None, request status indicated in cmd->Status.
2829  *
2830  * Context:
2831  *	Kernel context.
2832  */
2833 static void
2834 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2835 {
2836 	uint32_t	nv_size;
2837 
2838 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2839 
2840 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2841 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2842 	if (cmd->ResponseLen < nv_size) {
2843 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2844 		cmd->DetailStatus = nv_size;
2845 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2846 		    cmd->ResponseLen);
2847 		cmd->ResponseLen = 0;
2848 		return;
2849 	}
2850 
2851 	/* Get NVRAM data. */
2852 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2853 	    mode) != 0) {
2854 		cmd->Status = EXT_STATUS_COPY_ERR;
2855 		cmd->ResponseLen = 0;
2856 		EL(ha, "failed, copy error\n");
2857 	} else {
2858 		cmd->ResponseLen = nv_size;
2859 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2860 	}
2861 }
2862 
2863 /*
2864  * ql_write_nvram
2865  *	Loads NVRAM contents.
2866  *
2867  * Input:
2868  *	ha:	adapter state pointer.
2869  *	cmd:	EXT_IOCTL cmd struct pointer.
2870  *	mode:	flags.
2871  *
2872  * Returns:
2873  *	None, request status indicated in cmd->Status.
2874  *
2875  * Context:
2876  *	Kernel context.
2877  */
2878 static void
2879 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2880 {
2881 	uint32_t	nv_size;
2882 
2883 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2884 
2885 	nv_size = (uint32_t)(CFG_IST(ha, CFG_CTRL_242581) ?
2886 	    sizeof (nvram_24xx_t) : sizeof (nvram_t));
2887 	if (cmd->RequestLen < nv_size) {
2888 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2889 		cmd->DetailStatus = sizeof (nvram_t);
2890 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2891 		    cmd->RequestLen);
2892 		return;
2893 	}
2894 
2895 	/* Load NVRAM data. */
2896 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2897 	    mode) != 0) {
2898 		cmd->Status = EXT_STATUS_COPY_ERR;
2899 		EL(ha, "failed, copy error\n");
2900 	} else {
2901 		/*EMPTY*/
2902 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2903 	}
2904 }
2905 
2906 /*
2907  * ql_write_vpd
2908  *	Loads VPD contents.
2909  *
2910  * Input:
2911  *	ha:	adapter state pointer.
2912  *	cmd:	EXT_IOCTL cmd struct pointer.
2913  *	mode:	flags.
2914  *
2915  * Returns:
2916  *	None, request status indicated in cmd->Status.
2917  *
2918  * Context:
2919  *	Kernel context.
2920  */
2921 static void
2922 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2923 {
2924 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2925 
2926 	int32_t		rval = 0;
2927 
2928 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2929 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2930 		EL(ha, "failed, invalid request for HBA\n");
2931 		return;
2932 	}
2933 
2934 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2935 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2936 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2937 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2938 		    cmd->RequestLen);
2939 		return;
2940 	}
2941 
2942 	/* Load VPD data. */
2943 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2944 	    mode)) != 0) {
2945 		cmd->Status = EXT_STATUS_COPY_ERR;
2946 		cmd->DetailStatus = rval;
2947 		EL(ha, "failed, errno=%x\n", rval);
2948 	} else {
2949 		/*EMPTY*/
2950 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2951 	}
2952 }
2953 
2954 /*
2955  * ql_read_vpd
2956  *	Dumps VPD contents.
2957  *
2958  * Input:
2959  *	ha:	adapter state pointer.
2960  *	cmd:	EXT_IOCTL cmd struct pointer.
2961  *	mode:	flags.
2962  *
2963  * Returns:
2964  *	None, request status indicated in cmd->Status.
2965  *
2966  * Context:
2967  *	Kernel context.
2968  */
2969 static void
2970 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2971 {
2972 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2973 
2974 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2975 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2976 		EL(ha, "failed, invalid request for HBA\n");
2977 		return;
2978 	}
2979 
2980 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2981 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2982 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2983 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2984 		    cmd->ResponseLen);
2985 		return;
2986 	}
2987 
2988 	/* Dump VPD data. */
2989 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2990 	    mode)) != 0) {
2991 		cmd->Status = EXT_STATUS_COPY_ERR;
2992 		EL(ha, "failed,\n");
2993 	} else {
2994 		/*EMPTY*/
2995 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2996 	}
2997 }
2998 
2999 /*
3000  * ql_get_fcache
3001  *	Dumps flash cache contents.
3002  *
3003  * Input:
3004  *	ha:	adapter state pointer.
3005  *	cmd:	EXT_IOCTL cmd struct pointer.
3006  *	mode:	flags.
3007  *
3008  * Returns:
3009  *	None, request status indicated in cmd->Status.
3010  *
3011  * Context:
3012  *	Kernel context.
3013  */
3014 static void
3015 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3016 {
3017 	uint32_t	bsize, boff, types, cpsize, hsize;
3018 	ql_fcache_t	*fptr;
3019 
3020 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3021 
3022 	CACHE_LOCK(ha);
3023 
3024 	if (ha->fcache == NULL) {
3025 		CACHE_UNLOCK(ha);
3026 		cmd->Status = EXT_STATUS_ERR;
3027 		EL(ha, "failed, adapter fcache not setup\n");
3028 		return;
3029 	}
3030 
3031 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
3032 		bsize = 100;
3033 	} else {
3034 		bsize = 400;
3035 	}
3036 
3037 	if (cmd->ResponseLen < bsize) {
3038 		CACHE_UNLOCK(ha);
3039 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3040 		cmd->DetailStatus = bsize;
3041 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3042 		    bsize, cmd->ResponseLen);
3043 		return;
3044 	}
3045 
3046 	boff = 0;
3047 	bsize = 0;
3048 	fptr = ha->fcache;
3049 
3050 	/*
3051 	 * For backwards compatibility, get one of each image type
3052 	 */
3053 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3054 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3055 		/* Get the next image */
3056 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3057 
3058 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3059 
3060 			if (ddi_copyout(fptr->buf,
3061 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3062 			    cpsize, mode) != 0) {
3063 				CACHE_UNLOCK(ha);
3064 				EL(ha, "ddicopy failed, done\n");
3065 				cmd->Status = EXT_STATUS_COPY_ERR;
3066 				cmd->DetailStatus = 0;
3067 				return;
3068 			}
3069 			boff += 100;
3070 			bsize += cpsize;
3071 			types &= ~(fptr->type);
3072 		}
3073 	}
3074 
3075 	/*
3076 	 * Get the firmware image -- it needs to be last in the
3077 	 * buffer at offset 300 for backwards compatibility. Also for
3078 	 * backwards compatibility, the pci header is stripped off.
3079 	 */
3080 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3081 
3082 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3083 		if (hsize > fptr->buflen) {
3084 			CACHE_UNLOCK(ha);
3085 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3086 			    hsize, fptr->buflen);
3087 			cmd->Status = EXT_STATUS_COPY_ERR;
3088 			cmd->DetailStatus = 0;
3089 			return;
3090 		}
3091 
3092 		cpsize = ((fptr->buflen - hsize) < 100 ?
3093 		    fptr->buflen - hsize : 100);
3094 
3095 		if (ddi_copyout(fptr->buf+hsize,
3096 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3097 		    cpsize, mode) != 0) {
3098 			CACHE_UNLOCK(ha);
3099 			EL(ha, "fw ddicopy failed, done\n");
3100 			cmd->Status = EXT_STATUS_COPY_ERR;
3101 			cmd->DetailStatus = 0;
3102 			return;
3103 		}
3104 		bsize += 100;
3105 	}
3106 
3107 	CACHE_UNLOCK(ha);
3108 	cmd->Status = EXT_STATUS_OK;
3109 	cmd->DetailStatus = bsize;
3110 
3111 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3112 }
3113 
3114 /*
3115  * ql_get_fcache_ex
3116  *	Dumps flash cache contents.
3117  *
3118  * Input:
3119  *	ha:	adapter state pointer.
3120  *	cmd:	EXT_IOCTL cmd struct pointer.
3121  *	mode:	flags.
3122  *
3123  * Returns:
3124  *	None, request status indicated in cmd->Status.
3125  *
3126  * Context:
3127  *	Kernel context.
3128  */
3129 static void
3130 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3131 {
3132 	uint32_t	bsize = 0;
3133 	uint32_t	boff = 0;
3134 	ql_fcache_t	*fptr;
3135 
3136 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3137 
3138 	CACHE_LOCK(ha);
3139 	if (ha->fcache == NULL) {
3140 		CACHE_UNLOCK(ha);
3141 		cmd->Status = EXT_STATUS_ERR;
3142 		EL(ha, "failed, adapter fcache not setup\n");
3143 		return;
3144 	}
3145 
3146 	/* Make sure user passed enough buffer space */
3147 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3148 		bsize += FBUFSIZE;
3149 	}
3150 
3151 	if (cmd->ResponseLen < bsize) {
3152 		CACHE_UNLOCK(ha);
3153 		if (cmd->ResponseLen != 0) {
3154 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3155 			    bsize, cmd->ResponseLen);
3156 		}
3157 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3158 		cmd->DetailStatus = bsize;
3159 		return;
3160 	}
3161 
3162 	boff = 0;
3163 	fptr = ha->fcache;
3164 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3165 		/* Get the next image */
3166 		if (ddi_copyout(fptr->buf,
3167 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3168 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3169 		    mode) != 0) {
3170 			CACHE_UNLOCK(ha);
3171 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3172 			cmd->Status = EXT_STATUS_COPY_ERR;
3173 			cmd->DetailStatus = 0;
3174 			return;
3175 		}
3176 		boff += FBUFSIZE;
3177 		fptr = fptr->next;
3178 	}
3179 
3180 	CACHE_UNLOCK(ha);
3181 	cmd->Status = EXT_STATUS_OK;
3182 	cmd->DetailStatus = bsize;
3183 
3184 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3185 }
3186 
3187 /*
3188  * ql_read_flash
3189  *	Get flash contents.
3190  *
3191  * Input:
3192  *	ha:	adapter state pointer.
3193  *	cmd:	EXT_IOCTL cmd struct pointer.
3194  *	mode:	flags.
3195  *
3196  * Returns:
3197  *	None, request status indicated in cmd->Status.
3198  *
3199  * Context:
3200  *	Kernel context.
3201  */
3202 static void
3203 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3204 {
3205 	ql_xioctl_t	*xp = ha->xioctl;
3206 
3207 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3208 
3209 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3210 		EL(ha, "ql_stall_driver failed\n");
3211 		cmd->Status = EXT_STATUS_BUSY;
3212 		cmd->DetailStatus = xp->fdesc.flash_size;
3213 		cmd->ResponseLen = 0;
3214 		return;
3215 	}
3216 
3217 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3218 		cmd->Status = EXT_STATUS_ERR;
3219 		cmd->DetailStatus = xp->fdesc.flash_size;
3220 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3221 		    cmd->ResponseLen, xp->fdesc.flash_size);
3222 		cmd->ResponseLen = 0;
3223 	} else {
3224 		/* adjust read size to flash size */
3225 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3226 			EL(ha, "adjusting req=%xh, max=%xh\n",
3227 			    cmd->ResponseLen, xp->fdesc.flash_size);
3228 			cmd->ResponseLen = xp->fdesc.flash_size;
3229 		}
3230 
3231 		/* Get flash data. */
3232 		if (ql_flash_fcode_dump(ha,
3233 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3234 		    (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3235 			cmd->Status = EXT_STATUS_COPY_ERR;
3236 			cmd->ResponseLen = 0;
3237 			EL(ha, "failed,\n");
3238 		}
3239 	}
3240 
3241 	/* Resume I/O */
3242 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3243 		ql_restart_driver(ha);
3244 	} else {
3245 		EL(ha, "isp_abort_needed for restart\n");
3246 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3247 		    DRIVER_STALL);
3248 	}
3249 
3250 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3251 }
3252 
3253 /*
3254  * ql_write_flash
3255  *	Loads flash contents.
3256  *
3257  * Input:
3258  *	ha:	adapter state pointer.
3259  *	cmd:	EXT_IOCTL cmd struct pointer.
3260  *	mode:	flags.
3261  *
3262  * Returns:
3263  *	None, request status indicated in cmd->Status.
3264  *
3265  * Context:
3266  *	Kernel context.
3267  */
3268 static void
3269 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3270 {
3271 	ql_xioctl_t	*xp = ha->xioctl;
3272 
3273 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3274 
3275 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3276 		EL(ha, "ql_stall_driver failed\n");
3277 		cmd->Status = EXT_STATUS_BUSY;
3278 		cmd->DetailStatus = xp->fdesc.flash_size;
3279 		cmd->ResponseLen = 0;
3280 		return;
3281 	}
3282 
3283 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3284 		cmd->Status = EXT_STATUS_ERR;
3285 		cmd->DetailStatus = xp->fdesc.flash_size;
3286 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3287 		    cmd->RequestLen, xp->fdesc.flash_size);
3288 		cmd->ResponseLen = 0;
3289 	} else {
3290 		/* Load flash data. */
3291 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3292 			cmd->Status = EXT_STATUS_ERR;
3293 			cmd->DetailStatus =  xp->fdesc.flash_size;
3294 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3295 			    cmd->RequestLen, xp->fdesc.flash_size);
3296 		} else if (ql_flash_fcode_load(ha,
3297 		    (void *)(uintptr_t)(cmd->RequestAdr),
3298 		    (size_t)(cmd->RequestLen), mode) != 0) {
3299 			cmd->Status = EXT_STATUS_COPY_ERR;
3300 			EL(ha, "failed,\n");
3301 		}
3302 	}
3303 
3304 	/* Resume I/O */
3305 	if (CFG_IST(ha, CFG_CTRL_242581)) {
3306 		ql_restart_driver(ha);
3307 	} else {
3308 		EL(ha, "isp_abort_needed for restart\n");
3309 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3310 		    DRIVER_STALL);
3311 	}
3312 
3313 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3314 }
3315 
3316 /*
3317  * ql_diagnostic_loopback
3318  *	Performs EXT_CC_LOOPBACK Command
3319  *
3320  * Input:
3321  *	ha:	adapter state pointer.
3322  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3323  *	mode:	flags.
3324  *
3325  * Returns:
3326  *	None, request status indicated in cmd->Status.
3327  *
3328  * Context:
3329  *	Kernel context.
3330  */
3331 static void
3332 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3333 {
3334 	EXT_LOOPBACK_REQ	plbreq;
3335 	EXT_LOOPBACK_RSP	plbrsp;
3336 	ql_mbx_data_t		mr;
3337 	uint32_t		rval;
3338 	caddr_t			bp;
3339 
3340 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3341 
3342 	/* Get loop back request. */
3343 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3344 	    (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3345 		EL(ha, "failed, ddi_copyin\n");
3346 		cmd->Status = EXT_STATUS_COPY_ERR;
3347 		cmd->ResponseLen = 0;
3348 		return;
3349 	}
3350 
3351 	/* Check transfer length fits in buffer. */
3352 	if (plbreq.BufferLength < plbreq.TransferCount &&
3353 	    plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3354 		EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3355 		    "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3356 		    plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3357 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3358 		cmd->ResponseLen = 0;
3359 		return;
3360 	}
3361 
3362 	/* Allocate command memory. */
3363 	bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3364 	if (bp == NULL) {
3365 		EL(ha, "failed, kmem_zalloc\n");
3366 		cmd->Status = EXT_STATUS_NO_MEMORY;
3367 		cmd->ResponseLen = 0;
3368 		return;
3369 	}
3370 
3371 	/* Get loopback data. */
3372 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3373 	    bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3374 		EL(ha, "failed, ddi_copyin-2\n");
3375 		kmem_free(bp, plbreq.TransferCount);
3376 		cmd->Status = EXT_STATUS_COPY_ERR;
3377 		cmd->ResponseLen = 0;
3378 		return;
3379 	}
3380 
3381 	if (DRIVER_SUSPENDED(ha) || ql_stall_driver(ha, 0) != QL_SUCCESS) {
3382 		EL(ha, "failed, LOOP_NOT_READY\n");
3383 		kmem_free(bp, plbreq.TransferCount);
3384 		cmd->Status = EXT_STATUS_BUSY;
3385 		cmd->ResponseLen = 0;
3386 		return;
3387 	}
3388 
3389 	/* Shutdown IP. */
3390 	if (ha->flags & IP_INITIALIZED) {
3391 		(void) ql_shutdown_ip(ha);
3392 	}
3393 
3394 	/* determine topology so we can send the loopback or the echo */
3395 	/* Echo is supported on 2300's only and above */
3396 
3397 	if ((ha->topology & QL_F_PORT) && ha->device_id >= 0x2300) {
3398 		QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using echo\n",
3399 		    ha->instance);
3400 		plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3401 		rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount, 0, &mr);
3402 	} else {
3403 		plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3404 		rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3405 		    plbreq.Options, plbreq.IterationCount, &mr);
3406 	}
3407 
3408 	ql_restart_driver(ha);
3409 
3410 	/* Restart IP if it was shutdown. */
3411 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3412 		(void) ql_initialize_ip(ha);
3413 		ql_isp_rcvbuf(ha);
3414 	}
3415 
3416 	if (rval != QL_SUCCESS) {
3417 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3418 		kmem_free(bp, plbreq.TransferCount);
3419 		cmd->Status = EXT_STATUS_MAILBOX;
3420 		cmd->DetailStatus = rval;
3421 		cmd->ResponseLen = 0;
3422 		return;
3423 	}
3424 
3425 	/* Return loopback data. */
3426 	if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3427 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3428 		EL(ha, "failed, ddi_copyout\n");
3429 		kmem_free(bp, plbreq.TransferCount);
3430 		cmd->Status = EXT_STATUS_COPY_ERR;
3431 		cmd->ResponseLen = 0;
3432 		return;
3433 	}
3434 	kmem_free(bp, plbreq.TransferCount);
3435 
3436 	/* Return loopback results. */
3437 	plbrsp.BufferAddress = plbreq.BufferAddress;
3438 	plbrsp.BufferLength = plbreq.TransferCount;
3439 	plbrsp.CompletionStatus = mr.mb[0];
3440 
3441 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3442 		plbrsp.CrcErrorCount = 0;
3443 		plbrsp.DisparityErrorCount = 0;
3444 		plbrsp.FrameLengthErrorCount = 0;
3445 		plbrsp.IterationCountLastError = 0;
3446 	} else {
3447 		plbrsp.CrcErrorCount = mr.mb[1];
3448 		plbrsp.DisparityErrorCount = mr.mb[2];
3449 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3450 		plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3451 	}
3452 
3453 	rval = ddi_copyout((void *)&plbrsp,
3454 	    (void *)(uintptr_t)cmd->ResponseAdr,
3455 	    sizeof (EXT_LOOPBACK_RSP), mode);
3456 	if (rval != 0) {
3457 		EL(ha, "failed, ddi_copyout-2\n");
3458 		cmd->Status = EXT_STATUS_COPY_ERR;
3459 		cmd->ResponseLen = 0;
3460 		return;
3461 	}
3462 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3463 
3464 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3465 }
3466 
3467 /*
3468  * ql_send_els_rnid
3469  *	IOCTL for extended link service RNID command.
3470  *
3471  * Input:
3472  *	ha:	adapter state pointer.
3473  *	cmd:	User space CT arguments pointer.
3474  *	mode:	flags.
3475  *
3476  * Returns:
3477  *	None, request status indicated in cmd->Status.
3478  *
3479  * Context:
3480  *	Kernel context.
3481  */
3482 static void
3483 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3484 {
3485 	EXT_RNID_REQ	tmp_rnid;
3486 	port_id_t	tmp_fcid;
3487 	caddr_t		tmp_buf, bptr;
3488 	uint32_t	copy_len;
3489 	ql_tgt_t	*tq;
3490 	EXT_RNID_DATA	rnid_data;
3491 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3492 	int		rval = 0;
3493 	uint32_t	local_hba = 0;
3494 
3495 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3496 
3497 	if (DRIVER_SUSPENDED(ha)) {
3498 		EL(ha, "failed, LOOP_NOT_READY\n");
3499 		cmd->Status = EXT_STATUS_BUSY;
3500 		cmd->ResponseLen = 0;
3501 		return;
3502 	}
3503 
3504 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3505 		/* parameter error */
3506 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3507 		    cmd->RequestLen);
3508 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3509 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3510 		cmd->ResponseLen = 0;
3511 		return;
3512 	}
3513 
3514 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3515 	    &tmp_rnid, cmd->RequestLen, mode) != 0) {
3516 		EL(ha, "failed, ddi_copyin\n");
3517 		cmd->Status = EXT_STATUS_COPY_ERR;
3518 		cmd->ResponseLen = 0;
3519 		return;
3520 	}
3521 
3522 	/* Find loop ID of the device */
3523 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3524 		bptr = CFG_IST(ha, CFG_CTRL_242581) ?
3525 		    (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3526 		    (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3527 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3528 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3529 			local_hba = 1;
3530 		} else {
3531 			tq = ql_find_port(ha,
3532 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3533 		}
3534 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3535 		bptr = CFG_IST(ha, CFG_CTRL_242581) ?
3536 		    (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3537 		    (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3538 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3539 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3540 			local_hba = 1;
3541 		} else {
3542 			tq = ql_find_port(ha,
3543 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3544 		}
3545 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3546 		/*
3547 		 * Copy caller's d_id to tmp space.
3548 		 */
3549 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3550 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3551 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3552 
3553 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3554 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3555 			local_hba = 1;
3556 		} else {
3557 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3558 			    QLNT_PID);
3559 		}
3560 	}
3561 
3562 	/* Allocate memory for command. */
3563 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3564 	if (tmp_buf == NULL) {
3565 		EL(ha, "failed, kmem_zalloc\n");
3566 		cmd->Status = EXT_STATUS_NO_MEMORY;
3567 		cmd->ResponseLen = 0;
3568 		return;
3569 	}
3570 
3571 	if (local_hba) {
3572 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3573 		if (rval != QL_SUCCESS) {
3574 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3575 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3576 			cmd->Status = EXT_STATUS_ERR;
3577 			cmd->ResponseLen = 0;
3578 			return;
3579 		}
3580 
3581 		/* Save gotten RNID data. */
3582 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3583 
3584 		/* Now build the Send RNID response */
3585 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3586 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3587 		tmp_buf[2] = 0;
3588 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3589 
3590 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3591 			bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3592 			    EXT_DEF_WWN_NAME_SIZE);
3593 			bcopy(ha->init_ctrl_blk.cb24.node_name,
3594 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3595 			    EXT_DEF_WWN_NAME_SIZE);
3596 		} else {
3597 			bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3598 			    EXT_DEF_WWN_NAME_SIZE);
3599 			bcopy(ha->init_ctrl_blk.cb.node_name,
3600 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3601 			    EXT_DEF_WWN_NAME_SIZE);
3602 		}
3603 
3604 		bcopy((uint8_t *)&rnid_data,
3605 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3606 		    sizeof (EXT_RNID_DATA));
3607 	} else {
3608 		if (tq == NULL) {
3609 			/* no matching device */
3610 			EL(ha, "failed, device not found\n");
3611 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3612 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3613 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3614 			cmd->ResponseLen = 0;
3615 			return;
3616 		}
3617 
3618 		/* Send command */
3619 		rval = ql_send_rnid_els(ha, tq->loop_id,
3620 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3621 		if (rval != QL_SUCCESS) {
3622 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3623 			    rval, tq->loop_id);
3624 			while (LOOP_NOT_READY(ha)) {
3625 				ql_delay(ha, 100000);
3626 				if (loop_ready_wait-- == 0) {
3627 					EL(ha, "failed, loop not ready\n");
3628 					cmd->Status = EXT_STATUS_ERR;
3629 					cmd->ResponseLen = 0;
3630 				}
3631 			}
3632 			rval = ql_send_rnid_els(ha, tq->loop_id,
3633 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3634 			    tmp_buf);
3635 			if (rval != QL_SUCCESS) {
3636 				/* error */
3637 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3638 				    rval, tq->loop_id);
3639 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3640 				cmd->Status = EXT_STATUS_ERR;
3641 				cmd->ResponseLen = 0;
3642 				return;
3643 			}
3644 		}
3645 	}
3646 
3647 	/* Copy the response */
3648 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3649 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3650 
3651 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3652 	    copy_len, mode) != copy_len) {
3653 		cmd->Status = EXT_STATUS_COPY_ERR;
3654 		EL(ha, "failed, ddi_copyout\n");
3655 	} else {
3656 		cmd->ResponseLen = copy_len;
3657 		if (copy_len < SEND_RNID_RSP_SIZE) {
3658 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3659 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3660 
3661 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3662 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3663 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3664 		} else {
3665 			cmd->Status = EXT_STATUS_OK;
3666 			QL_PRINT_9(CE_CONT, "(%d): done\n",
3667 			    ha->instance);
3668 		}
3669 	}
3670 
3671 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3672 }
3673 
3674 /*
3675  * ql_set_host_data
3676  *	Process IOCTL subcommand to set host/adapter related data.
3677  *
3678  * Input:
3679  *	ha:	adapter state pointer.
3680  *	cmd:	User space CT arguments pointer.
3681  *	mode:	flags.
3682  *
3683  * Returns:
3684  *	None, request status indicated in cmd->Status.
3685  *
3686  * Context:
3687  *	Kernel context.
3688  */
3689 static void
3690 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3691 {
3692 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3693 	    cmd->SubCode);
3694 
3695 	/*
3696 	 * case off on command subcode
3697 	 */
3698 	switch (cmd->SubCode) {
3699 	case EXT_SC_SET_RNID:
3700 		ql_set_rnid_parameters(ha, cmd, mode);
3701 		break;
3702 	case EXT_SC_RST_STATISTICS:
3703 		(void) ql_reset_statistics(ha, cmd);
3704 		break;
3705 	case EXT_SC_SET_BEACON_STATE:
3706 		ql_set_led_state(ha, cmd, mode);
3707 		break;
3708 	case EXT_SC_SET_PARMS:
3709 	case EXT_SC_SET_BUS_MODE:
3710 	case EXT_SC_SET_DR_DUMP_BUF:
3711 	case EXT_SC_SET_RISC_CODE:
3712 	case EXT_SC_SET_FLASH_RAM:
3713 	case EXT_SC_SET_LUN_BITMASK:
3714 	case EXT_SC_SET_RETRY_CNT:
3715 	case EXT_SC_SET_RTIN:
3716 	case EXT_SC_SET_FC_LUN_BITMASK:
3717 	case EXT_SC_ADD_TARGET_DEVICE:
3718 	case EXT_SC_SWAP_TARGET_DEVICE:
3719 	case EXT_SC_SET_SEL_TIMEOUT:
3720 	default:
3721 		/* function not supported. */
3722 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3723 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3724 		break;
3725 	}
3726 
3727 	if (cmd->Status != EXT_STATUS_OK) {
3728 		EL(ha, "failed, Status=%d\n", cmd->Status);
3729 	} else {
3730 		/*EMPTY*/
3731 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3732 	}
3733 }
3734 
3735 /*
3736  * ql_get_host_data
3737  *	Performs EXT_CC_GET_DATA subcommands.
3738  *
3739  * Input:
3740  *	ha:	adapter state pointer.
3741  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3742  *	mode:	flags.
3743  *
3744  * Returns:
3745  *	None, request status indicated in cmd->Status.
3746  *
3747  * Context:
3748  *	Kernel context.
3749  */
3750 static void
3751 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3752 {
3753 	int	out_size = 0;
3754 
3755 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3756 	    cmd->SubCode);
3757 
3758 	/* case off on command subcode */
3759 	switch (cmd->SubCode) {
3760 	case EXT_SC_GET_STATISTICS:
3761 		out_size = sizeof (EXT_HBA_PORT_STAT);
3762 		break;
3763 	case EXT_SC_GET_FC_STATISTICS:
3764 		out_size = sizeof (EXT_HBA_PORT_STAT);
3765 		break;
3766 	case EXT_SC_GET_PORT_SUMMARY:
3767 		out_size = sizeof (EXT_DEVICEDATA);
3768 		break;
3769 	case EXT_SC_GET_RNID:
3770 		out_size = sizeof (EXT_RNID_DATA);
3771 		break;
3772 	case EXT_SC_GET_TARGET_ID:
3773 		out_size = sizeof (EXT_DEST_ADDR);
3774 		break;
3775 	case EXT_SC_GET_BEACON_STATE:
3776 		out_size = sizeof (EXT_BEACON_CONTROL);
3777 		break;
3778 	case EXT_SC_GET_FC4_STATISTICS:
3779 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3780 		break;
3781 	case EXT_SC_GET_DCBX_PARAM:
3782 		out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3783 		break;
3784 	case EXT_SC_GET_SCSI_ADDR:
3785 	case EXT_SC_GET_ERR_DETECTIONS:
3786 	case EXT_SC_GET_BUS_MODE:
3787 	case EXT_SC_GET_DR_DUMP_BUF:
3788 	case EXT_SC_GET_RISC_CODE:
3789 	case EXT_SC_GET_FLASH_RAM:
3790 	case EXT_SC_GET_LINK_STATUS:
3791 	case EXT_SC_GET_LOOP_ID:
3792 	case EXT_SC_GET_LUN_BITMASK:
3793 	case EXT_SC_GET_PORT_DATABASE:
3794 	case EXT_SC_GET_PORT_DATABASE_MEM:
3795 	case EXT_SC_GET_POSITION_MAP:
3796 	case EXT_SC_GET_RETRY_CNT:
3797 	case EXT_SC_GET_RTIN:
3798 	case EXT_SC_GET_FC_LUN_BITMASK:
3799 	case EXT_SC_GET_SEL_TIMEOUT:
3800 	default:
3801 		/* function not supported. */
3802 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3803 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3804 		cmd->ResponseLen = 0;
3805 		return;
3806 	}
3807 
3808 	if (cmd->ResponseLen < out_size) {
3809 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3810 		cmd->DetailStatus = out_size;
3811 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3812 		    cmd->ResponseLen, out_size);
3813 		cmd->ResponseLen = 0;
3814 		return;
3815 	}
3816 
3817 	switch (cmd->SubCode) {
3818 	case EXT_SC_GET_RNID:
3819 		ql_get_rnid_parameters(ha, cmd, mode);
3820 		break;
3821 	case EXT_SC_GET_STATISTICS:
3822 		ql_get_statistics(ha, cmd, mode);
3823 		break;
3824 	case EXT_SC_GET_FC_STATISTICS:
3825 		ql_get_statistics_fc(ha, cmd, mode);
3826 		break;
3827 	case EXT_SC_GET_FC4_STATISTICS:
3828 		ql_get_statistics_fc4(ha, cmd, mode);
3829 		break;
3830 	case EXT_SC_GET_PORT_SUMMARY:
3831 		ql_get_port_summary(ha, cmd, mode);
3832 		break;
3833 	case EXT_SC_GET_TARGET_ID:
3834 		ql_get_target_id(ha, cmd, mode);
3835 		break;
3836 	case EXT_SC_GET_BEACON_STATE:
3837 		ql_get_led_state(ha, cmd, mode);
3838 		break;
3839 	case EXT_SC_GET_DCBX_PARAM:
3840 		ql_get_dcbx_parameters(ha, cmd, mode);
3841 		break;
3842 	}
3843 
3844 	if (cmd->Status != EXT_STATUS_OK) {
3845 		EL(ha, "failed, Status=%d\n", cmd->Status);
3846 	} else {
3847 		/*EMPTY*/
3848 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3849 	}
3850 }
3851 
3852 /* ******************************************************************** */
3853 /*			Helper Functions				*/
3854 /* ******************************************************************** */
3855 
3856 /*
3857  * ql_lun_count
3858  *	Get numbers of LUNS on target.
3859  *
3860  * Input:
3861  *	ha:	adapter state pointer.
3862  *	q:	device queue pointer.
3863  *
3864  * Returns:
3865  *	Number of LUNs.
3866  *
3867  * Context:
3868  *	Kernel context.
3869  */
3870 static int
3871 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3872 {
3873 	int	cnt;
3874 
3875 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3876 
3877 	/* Bypass LUNs that failed. */
3878 	cnt = ql_report_lun(ha, tq);
3879 	if (cnt == 0) {
3880 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3881 	}
3882 
3883 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3884 
3885 	return (cnt);
3886 }
3887 
3888 /*
3889  * ql_report_lun
3890  *	Get numbers of LUNS using report LUN command.
3891  *
3892  * Input:
3893  *	ha:	adapter state pointer.
3894  *	q:	target queue pointer.
3895  *
3896  * Returns:
3897  *	Number of LUNs.
3898  *
3899  * Context:
3900  *	Kernel context.
3901  */
3902 static int
3903 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
3904 {
3905 	int			rval;
3906 	uint8_t			retries;
3907 	ql_mbx_iocb_t		*pkt;
3908 	ql_rpt_lun_lst_t	*rpt;
3909 	dma_mem_t		dma_mem;
3910 	uint32_t		pkt_size, cnt;
3911 	uint16_t		comp_status;
3912 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
3913 
3914 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3915 
3916 	if (DRIVER_SUSPENDED(ha)) {
3917 		EL(ha, "failed, LOOP_NOT_READY\n");
3918 		return (0);
3919 	}
3920 
3921 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
3922 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
3923 	if (pkt == NULL) {
3924 		EL(ha, "failed, kmem_zalloc\n");
3925 		return (0);
3926 	}
3927 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
3928 
3929 	/* Get DMA memory for the IOCB */
3930 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
3931 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
3932 		cmn_err(CE_WARN, "%s(%d): DMA memory "
3933 		    "alloc failed", QL_NAME, ha->instance);
3934 		kmem_free(pkt, pkt_size);
3935 		return (0);
3936 	}
3937 
3938 	for (retries = 0; retries < 4; retries++) {
3939 		if (CFG_IST(ha, CFG_CTRL_242581)) {
3940 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
3941 			pkt->cmd24.entry_count = 1;
3942 
3943 			/* Set N_port handle */
3944 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
3945 
3946 			/* Set target ID */
3947 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
3948 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
3949 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
3950 
3951 			/* Set ISP command timeout. */
3952 			pkt->cmd24.timeout = LE_16(15);
3953 
3954 			/* Load SCSI CDB */
3955 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
3956 			pkt->cmd24.scsi_cdb[6] =
3957 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3958 			pkt->cmd24.scsi_cdb[7] =
3959 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
3960 			pkt->cmd24.scsi_cdb[8] =
3961 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3962 			pkt->cmd24.scsi_cdb[9] =
3963 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
3964 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
3965 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
3966 				    + cnt, 4);
3967 			}
3968 
3969 			/* Set tag queue control flags */
3970 			pkt->cmd24.task = TA_STAG;
3971 
3972 			/* Set transfer direction. */
3973 			pkt->cmd24.control_flags = CF_RD;
3974 
3975 			/* Set data segment count. */
3976 			pkt->cmd24.dseg_count = LE_16(1);
3977 
3978 			/* Load total byte count. */
3979 			/* Load data descriptor. */
3980 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
3981 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
3982 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
3983 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
3984 			pkt->cmd24.total_byte_count =
3985 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3986 			pkt->cmd24.dseg_0_length =
3987 			    LE_32(sizeof (ql_rpt_lun_lst_t));
3988 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
3989 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
3990 			pkt->cmd3.entry_count = 1;
3991 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
3992 				pkt->cmd3.target_l = LSB(tq->loop_id);
3993 				pkt->cmd3.target_h = MSB(tq->loop_id);
3994 			} else {
3995 				pkt->cmd3.target_h = LSB(tq->loop_id);
3996 			}
3997 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
3998 			pkt->cmd3.timeout = LE_16(15);
3999 			pkt->cmd3.dseg_count = LE_16(1);
4000 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4001 			pkt->cmd3.scsi_cdb[6] =
4002 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4003 			pkt->cmd3.scsi_cdb[7] =
4004 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4005 			pkt->cmd3.scsi_cdb[8] =
4006 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4007 			pkt->cmd3.scsi_cdb[9] =
4008 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4009 			pkt->cmd3.byte_count =
4010 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4011 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4012 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4013 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4014 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4015 			pkt->cmd3.dseg_0_length =
4016 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4017 		} else {
4018 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4019 			pkt->cmd.entry_count = 1;
4020 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4021 				pkt->cmd.target_l = LSB(tq->loop_id);
4022 				pkt->cmd.target_h = MSB(tq->loop_id);
4023 			} else {
4024 				pkt->cmd.target_h = LSB(tq->loop_id);
4025 			}
4026 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4027 			pkt->cmd.timeout = LE_16(15);
4028 			pkt->cmd.dseg_count = LE_16(1);
4029 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4030 			pkt->cmd.scsi_cdb[6] =
4031 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4032 			pkt->cmd.scsi_cdb[7] =
4033 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4034 			pkt->cmd.scsi_cdb[8] =
4035 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4036 			pkt->cmd.scsi_cdb[9] =
4037 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4038 			pkt->cmd.byte_count =
4039 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4040 			pkt->cmd.dseg_0_address = (uint32_t)
4041 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4042 			pkt->cmd.dseg_0_length =
4043 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4044 		}
4045 
4046 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4047 		    sizeof (ql_mbx_iocb_t));
4048 
4049 		/* Sync in coming DMA buffer. */
4050 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4051 		    DDI_DMA_SYNC_FORKERNEL);
4052 		/* Copy in coming DMA data. */
4053 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4054 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4055 
4056 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4057 			pkt->sts24.entry_status = (uint8_t)
4058 			    (pkt->sts24.entry_status & 0x3c);
4059 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4060 			scsi_status_h = pkt->sts24.scsi_status_h;
4061 			scsi_status_l = pkt->sts24.scsi_status_l;
4062 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4063 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4064 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4065 		} else {
4066 			pkt->sts.entry_status = (uint8_t)
4067 			    (pkt->sts.entry_status & 0x7e);
4068 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4069 			scsi_status_h = pkt->sts.scsi_status_h;
4070 			scsi_status_l = pkt->sts.scsi_status_l;
4071 			reqs = &pkt->sts.req_sense_data[0];
4072 		}
4073 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4074 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4075 			    pkt->sts.entry_status, tq->d_id.b24);
4076 			rval = QL_FUNCTION_PARAMETER_ERROR;
4077 		}
4078 
4079 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4080 		    scsi_status_l & STATUS_CHECK) {
4081 			/* Device underrun, treat as OK. */
4082 			if (rval == QL_SUCCESS &&
4083 			    comp_status == CS_DATA_UNDERRUN &&
4084 			    scsi_status_h & FCP_RESID_UNDER) {
4085 				break;
4086 			}
4087 
4088 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4089 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4090 			    comp_status, scsi_status_h, scsi_status_l);
4091 
4092 			if (rval == QL_SUCCESS) {
4093 				if ((comp_status == CS_TIMEOUT) ||
4094 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4095 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4096 					rval = QL_FUNCTION_TIMEOUT;
4097 					break;
4098 				}
4099 				rval = QL_FUNCTION_FAILED;
4100 			} else if (rval == QL_ABORTED) {
4101 				break;
4102 			}
4103 
4104 			if (scsi_status_l & STATUS_CHECK) {
4105 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4106 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4107 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4108 				    reqs[1], reqs[2], reqs[3], reqs[4],
4109 				    reqs[5], reqs[6], reqs[7], reqs[8],
4110 				    reqs[9], reqs[10], reqs[11], reqs[12],
4111 				    reqs[13], reqs[14], reqs[15], reqs[16],
4112 				    reqs[17]);
4113 			}
4114 		} else {
4115 			break;
4116 		}
4117 		bzero((caddr_t)pkt, pkt_size);
4118 	}
4119 
4120 	if (rval != QL_SUCCESS) {
4121 		EL(ha, "failed=%xh\n", rval);
4122 		rval = 0;
4123 	} else {
4124 		QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4125 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4126 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4127 	}
4128 
4129 	kmem_free(pkt, pkt_size);
4130 	ql_free_dma_resource(ha, &dma_mem);
4131 
4132 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4133 
4134 	return (rval);
4135 }
4136 
4137 /*
4138  * ql_inq_scan
4139  *	Get numbers of LUNS using inquiry command.
4140  *
4141  * Input:
4142  *	ha:		adapter state pointer.
4143  *	tq:		target queue pointer.
4144  *	count:		scan for the number of existing LUNs.
4145  *
4146  * Returns:
4147  *	Number of LUNs.
4148  *
4149  * Context:
4150  *	Kernel context.
4151  */
4152 static int
4153 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4154 {
4155 	int		lun, cnt, rval;
4156 	ql_mbx_iocb_t	*pkt;
4157 	uint8_t		*inq;
4158 	uint32_t	pkt_size;
4159 
4160 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4161 
4162 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4163 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4164 	if (pkt == NULL) {
4165 		EL(ha, "failed, kmem_zalloc\n");
4166 		return (0);
4167 	}
4168 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4169 
4170 	cnt = 0;
4171 	for (lun = 0; lun < MAX_LUNS; lun++) {
4172 
4173 		if (DRIVER_SUSPENDED(ha)) {
4174 			rval = QL_LOOP_DOWN;
4175 			cnt = 0;
4176 			break;
4177 		}
4178 
4179 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4180 		if (rval == QL_SUCCESS) {
4181 			switch (*inq) {
4182 			case DTYPE_DIRECT:
4183 			case DTYPE_PROCESSOR:	/* Appliance. */
4184 			case DTYPE_WORM:
4185 			case DTYPE_RODIRECT:
4186 			case DTYPE_SCANNER:
4187 			case DTYPE_OPTICAL:
4188 			case DTYPE_CHANGER:
4189 			case DTYPE_ESI:
4190 				cnt++;
4191 				break;
4192 			case DTYPE_SEQUENTIAL:
4193 				cnt++;
4194 				tq->flags |= TQF_TAPE_DEVICE;
4195 				break;
4196 			default:
4197 				QL_PRINT_9(CE_CONT, "(%d): failed, "
4198 				    "unsupported device id=%xh, lun=%d, "
4199 				    "type=%xh\n", ha->instance, tq->loop_id,
4200 				    lun, *inq);
4201 				break;
4202 			}
4203 
4204 			if (*inq == DTYPE_ESI || cnt >= count) {
4205 				break;
4206 			}
4207 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4208 			cnt = 0;
4209 			break;
4210 		}
4211 	}
4212 
4213 	kmem_free(pkt, pkt_size);
4214 
4215 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4216 
4217 	return (cnt);
4218 }
4219 
4220 /*
4221  * ql_inq
4222  *	Issue inquiry command.
4223  *
4224  * Input:
4225  *	ha:		adapter state pointer.
4226  *	tq:		target queue pointer.
4227  *	lun:		LUN number.
4228  *	pkt:		command and buffer pointer.
4229  *	inq_len:	amount of inquiry data.
4230  *
4231  * Returns:
4232  *	ql local function return status code.
4233  *
4234  * Context:
4235  *	Kernel context.
4236  */
4237 static int
4238 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4239     uint8_t inq_len)
4240 {
4241 	dma_mem_t	dma_mem;
4242 	int		rval, retries;
4243 	uint32_t	pkt_size, cnt;
4244 	uint16_t	comp_status;
4245 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4246 	caddr_t		inq_data;
4247 
4248 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4249 
4250 	if (DRIVER_SUSPENDED(ha)) {
4251 		EL(ha, "failed, loop down\n");
4252 		return (QL_FUNCTION_TIMEOUT);
4253 	}
4254 
4255 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4256 	bzero((caddr_t)pkt, pkt_size);
4257 
4258 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4259 
4260 	/* Get DMA memory for the IOCB */
4261 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4262 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4263 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4264 		    "alloc failed", QL_NAME, ha->instance);
4265 		return (0);
4266 	}
4267 
4268 	for (retries = 0; retries < 4; retries++) {
4269 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4270 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4271 			pkt->cmd24.entry_count = 1;
4272 
4273 			/* Set LUN number */
4274 			pkt->cmd24.fcp_lun[2] = LSB(lun);
4275 			pkt->cmd24.fcp_lun[3] = MSB(lun);
4276 
4277 			/* Set N_port handle */
4278 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4279 
4280 			/* Set target ID */
4281 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4282 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4283 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4284 
4285 			/* Set ISP command timeout. */
4286 			pkt->cmd24.timeout = LE_16(15);
4287 
4288 			/* Load SCSI CDB */
4289 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4290 			pkt->cmd24.scsi_cdb[4] = inq_len;
4291 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4292 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4293 				    + cnt, 4);
4294 			}
4295 
4296 			/* Set tag queue control flags */
4297 			pkt->cmd24.task = TA_STAG;
4298 
4299 			/* Set transfer direction. */
4300 			pkt->cmd24.control_flags = CF_RD;
4301 
4302 			/* Set data segment count. */
4303 			pkt->cmd24.dseg_count = LE_16(1);
4304 
4305 			/* Load total byte count. */
4306 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4307 
4308 			/* Load data descriptor. */
4309 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4310 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4311 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4312 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4313 			pkt->cmd24.dseg_0_length = LE_32(inq_len);
4314 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4315 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4316 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4317 
4318 			pkt->cmd3.entry_count = 1;
4319 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4320 				pkt->cmd3.target_l = LSB(tq->loop_id);
4321 				pkt->cmd3.target_h = MSB(tq->loop_id);
4322 			} else {
4323 				pkt->cmd3.target_h = LSB(tq->loop_id);
4324 			}
4325 			pkt->cmd3.lun_l = LSB(lun);
4326 			pkt->cmd3.lun_h = MSB(lun);
4327 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4328 			pkt->cmd3.timeout = LE_16(15);
4329 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4330 			pkt->cmd3.scsi_cdb[4] = inq_len;
4331 			pkt->cmd3.dseg_count = LE_16(1);
4332 			pkt->cmd3.byte_count = LE_32(inq_len);
4333 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4334 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4335 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4336 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4337 			pkt->cmd3.dseg_0_length = LE_32(inq_len);
4338 		} else {
4339 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4340 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4341 
4342 			pkt->cmd.entry_count = 1;
4343 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4344 				pkt->cmd.target_l = LSB(tq->loop_id);
4345 				pkt->cmd.target_h = MSB(tq->loop_id);
4346 			} else {
4347 				pkt->cmd.target_h = LSB(tq->loop_id);
4348 			}
4349 			pkt->cmd.lun_l = LSB(lun);
4350 			pkt->cmd.lun_h = MSB(lun);
4351 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4352 			pkt->cmd.timeout = LE_16(15);
4353 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4354 			pkt->cmd.scsi_cdb[4] = inq_len;
4355 			pkt->cmd.dseg_count = LE_16(1);
4356 			pkt->cmd.byte_count = LE_32(inq_len);
4357 			pkt->cmd.dseg_0_address = (uint32_t)
4358 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4359 			pkt->cmd.dseg_0_length = LE_32(inq_len);
4360 		}
4361 
4362 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4363 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4364 		    sizeof (ql_mbx_iocb_t));
4365 
4366 		/* Sync in coming IOCB DMA buffer. */
4367 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4368 		    DDI_DMA_SYNC_FORKERNEL);
4369 		/* Copy in coming DMA data. */
4370 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4371 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4372 
4373 		if (CFG_IST(ha, CFG_CTRL_242581)) {
4374 			pkt->sts24.entry_status = (uint8_t)
4375 			    (pkt->sts24.entry_status & 0x3c);
4376 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4377 			scsi_status_h = pkt->sts24.scsi_status_h;
4378 			scsi_status_l = pkt->sts24.scsi_status_l;
4379 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4380 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4381 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4382 		} else {
4383 			pkt->sts.entry_status = (uint8_t)
4384 			    (pkt->sts.entry_status & 0x7e);
4385 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4386 			scsi_status_h = pkt->sts.scsi_status_h;
4387 			scsi_status_l = pkt->sts.scsi_status_l;
4388 			reqs = &pkt->sts.req_sense_data[0];
4389 		}
4390 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4391 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4392 			    pkt->sts.entry_status, tq->d_id.b24);
4393 			rval = QL_FUNCTION_PARAMETER_ERROR;
4394 		}
4395 
4396 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4397 		    scsi_status_l & STATUS_CHECK) {
4398 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4399 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4400 			    comp_status, scsi_status_h, scsi_status_l);
4401 
4402 			if (rval == QL_SUCCESS) {
4403 				if ((comp_status == CS_TIMEOUT) ||
4404 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4405 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4406 					rval = QL_FUNCTION_TIMEOUT;
4407 					break;
4408 				}
4409 				rval = QL_FUNCTION_FAILED;
4410 			}
4411 
4412 			if (scsi_status_l & STATUS_CHECK) {
4413 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4414 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4415 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4416 				    reqs[1], reqs[2], reqs[3], reqs[4],
4417 				    reqs[5], reqs[6], reqs[7], reqs[8],
4418 				    reqs[9], reqs[10], reqs[11], reqs[12],
4419 				    reqs[13], reqs[14], reqs[15], reqs[16],
4420 				    reqs[17]);
4421 			}
4422 		} else {
4423 			break;
4424 		}
4425 	}
4426 	ql_free_dma_resource(ha, &dma_mem);
4427 
4428 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4429 
4430 	return (rval);
4431 }
4432 
4433 /*
4434  * ql_get_buffer_data
4435  *	Copies data from user space to kernal buffer.
4436  *
4437  * Input:
4438  *	src:	User source buffer address.
4439  *	dst:	Kernal destination buffer address.
4440  *	size:	Amount of data.
4441  *	mode:	flags.
4442  *
4443  * Returns:
4444  *	Returns number of bytes transferred.
4445  *
4446  * Context:
4447  *	Kernel context.
4448  */
4449 static uint32_t
4450 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4451 {
4452 	uint32_t	cnt;
4453 
4454 	for (cnt = 0; cnt < size; cnt++) {
4455 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4456 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4457 			break;
4458 		}
4459 	}
4460 
4461 	return (cnt);
4462 }
4463 
4464 /*
4465  * ql_send_buffer_data
4466  *	Copies data from kernal buffer to user space.
4467  *
4468  * Input:
4469  *	src:	Kernal source buffer address.
4470  *	dst:	User destination buffer address.
4471  *	size:	Amount of data.
4472  *	mode:	flags.
4473  *
4474  * Returns:
4475  *	Returns number of bytes transferred.
4476  *
4477  * Context:
4478  *	Kernel context.
4479  */
4480 static uint32_t
4481 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4482 {
4483 	uint32_t	cnt;
4484 
4485 	for (cnt = 0; cnt < size; cnt++) {
4486 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4487 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4488 			break;
4489 		}
4490 	}
4491 
4492 	return (cnt);
4493 }
4494 
4495 /*
4496  * ql_find_port
4497  *	Locates device queue.
4498  *
4499  * Input:
4500  *	ha:	adapter state pointer.
4501  *	name:	device port name.
4502  *
4503  * Returns:
4504  *	Returns target queue pointer.
4505  *
4506  * Context:
4507  *	Kernel context.
4508  */
4509 static ql_tgt_t *
4510 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4511 {
4512 	ql_link_t	*link;
4513 	ql_tgt_t	*tq;
4514 	uint16_t	index;
4515 
4516 	/* Scan port list for requested target */
4517 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4518 		for (link = ha->dev[index].first; link != NULL;
4519 		    link = link->next) {
4520 			tq = link->base_address;
4521 
4522 			switch (type) {
4523 			case QLNT_LOOP_ID:
4524 				if (bcmp(name, &tq->loop_id,
4525 				    sizeof (uint16_t)) == 0) {
4526 					return (tq);
4527 				}
4528 				break;
4529 			case QLNT_PORT:
4530 				if (bcmp(name, tq->port_name, 8) == 0) {
4531 					return (tq);
4532 				}
4533 				break;
4534 			case QLNT_NODE:
4535 				if (bcmp(name, tq->node_name, 8) == 0) {
4536 					return (tq);
4537 				}
4538 				break;
4539 			case QLNT_PID:
4540 				if (bcmp(name, tq->d_id.r.d_id,
4541 				    sizeof (tq->d_id.r.d_id)) == 0) {
4542 					return (tq);
4543 				}
4544 				break;
4545 			default:
4546 				EL(ha, "failed, invalid type=%d\n",  type);
4547 				return (NULL);
4548 			}
4549 		}
4550 	}
4551 
4552 	return (NULL);
4553 }
4554 
4555 /*
4556  * ql_24xx_flash_desc
4557  *	Get flash descriptor table.
4558  *
4559  * Input:
4560  *	ha:		adapter state pointer.
4561  *
4562  * Returns:
4563  *	ql local function return status code.
4564  *
4565  * Context:
4566  *	Kernel context.
4567  */
4568 static int
4569 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4570 {
4571 	uint32_t	cnt;
4572 	uint16_t	chksum, *bp, data;
4573 	int		rval;
4574 	flash_desc_t	*fdesc;
4575 	ql_xioctl_t	*xp = ha->xioctl;
4576 
4577 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4578 
4579 	if (ha->flash_desc_addr == 0) {
4580 		EL(ha, "desc ptr=0\n");
4581 		return (QL_FUNCTION_FAILED);
4582 	}
4583 
4584 	if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4585 		EL(ha, "kmem_zalloc=null\n");
4586 		return (QL_MEMORY_ALLOC_FAILED);
4587 	}
4588 	rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4589 	    ha->flash_desc_addr << 2);
4590 	if (rval != QL_SUCCESS) {
4591 		EL(ha, "read status=%xh\n", rval);
4592 		kmem_free(fdesc, sizeof (flash_desc_t));
4593 		return (rval);
4594 	}
4595 
4596 	chksum = 0;
4597 	bp = (uint16_t *)fdesc;
4598 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4599 		data = *bp++;
4600 		LITTLE_ENDIAN_16(&data);
4601 		chksum += data;
4602 	}
4603 
4604 	LITTLE_ENDIAN_32(&fdesc->flash_valid);
4605 	LITTLE_ENDIAN_16(&fdesc->flash_version);
4606 	LITTLE_ENDIAN_16(&fdesc->flash_len);
4607 	LITTLE_ENDIAN_16(&fdesc->flash_checksum);
4608 	LITTLE_ENDIAN_16(&fdesc->flash_manuf);
4609 	LITTLE_ENDIAN_16(&fdesc->flash_id);
4610 	LITTLE_ENDIAN_32(&fdesc->block_size);
4611 	LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4612 	LITTLE_ENDIAN_32(&fdesc->flash_size);
4613 	LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4614 	LITTLE_ENDIAN_32(&fdesc->read_timeout);
4615 
4616 	/* flash size in desc table is in 1024 bytes */
4617 	fdesc->flash_size = fdesc->flash_size * 0x400;
4618 
4619 	if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4620 	    fdesc->flash_version != FLASH_DESC_VERSION) {
4621 		EL(ha, "invalid descriptor table\n");
4622 		kmem_free(fdesc, sizeof (flash_desc_t));
4623 		return (QL_FUNCTION_FAILED);
4624 	}
4625 
4626 	bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4627 	kmem_free(fdesc, sizeof (flash_desc_t));
4628 
4629 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4630 
4631 	return (QL_SUCCESS);
4632 }
4633 
4634 /*
4635  * ql_setup_flash
4636  *	Gets the manufacturer and id number of the flash chip, and
4637  *	sets up the size parameter.
4638  *
4639  * Input:
4640  *	ha:	adapter state pointer.
4641  *
4642  * Returns:
4643  *	int:	ql local function return status code.
4644  *
4645  * Context:
4646  *	Kernel context.
4647  */
4648 static int
4649 ql_setup_flash(ql_adapter_state_t *ha)
4650 {
4651 	ql_xioctl_t	*xp = ha->xioctl;
4652 	int		rval = QL_SUCCESS;
4653 
4654 	if (xp->fdesc.flash_size != 0) {
4655 		return (rval);
4656 	}
4657 
4658 	if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4659 		return (QL_FUNCTION_FAILED);
4660 	}
4661 
4662 	if (CFG_IST(ha, CFG_CTRL_2581)) {
4663 		/*
4664 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4665 		 * 25xx flash size to avoid failing of ql_dump_focde.
4666 		 */
4667 		ha->xioctl->fdesc.flash_size = CFG_IST(ha, CFG_CTRL_25XX) ?
4668 		    0x200000 : 0x400000;
4669 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4670 			EL(ha, "flash desc table ok, exit\n");
4671 			return (rval);
4672 		}
4673 		(void) ql_24xx_flash_id(ha);
4674 
4675 	} else if (CFG_IST(ha, CFG_CTRL_242581)) {
4676 		(void) ql_24xx_flash_id(ha);
4677 	} else {
4678 		ql_flash_enable(ha);
4679 
4680 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4681 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4682 		ql_write_flash_byte(ha, 0x5555, 0x90);
4683 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4684 
4685 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4686 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4687 			ql_write_flash_byte(ha, 0x5555, 0x55);
4688 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4689 			xp->fdesc.flash_id = (uint16_t)
4690 			    ql_read_flash_byte(ha, 0x0002);
4691 		} else {
4692 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4693 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4694 			ql_write_flash_byte(ha, 0x5555, 0x90);
4695 			xp->fdesc.flash_id = (uint16_t)
4696 			    ql_read_flash_byte(ha, 0x0001);
4697 		}
4698 
4699 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4700 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4701 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4702 
4703 		ql_flash_disable(ha);
4704 	}
4705 
4706 	/* Default flash descriptor table. */
4707 	xp->fdesc.write_statusreg_cmd = 1;
4708 	xp->fdesc.write_enable_bits = 0;
4709 	xp->fdesc.unprotect_sector_cmd = 0;
4710 	xp->fdesc.protect_sector_cmd = 0;
4711 	xp->fdesc.write_disable_bits = 0x9c;
4712 	xp->fdesc.block_size = 0x10000;
4713 	xp->fdesc.erase_cmd = 0xd8;
4714 
4715 	switch (xp->fdesc.flash_manuf) {
4716 	case AMD_FLASH:
4717 		switch (xp->fdesc.flash_id) {
4718 		case SPAN_FLASHID_2048K:
4719 			xp->fdesc.flash_size = 0x200000;
4720 			break;
4721 		case AMD_FLASHID_1024K:
4722 			xp->fdesc.flash_size = 0x100000;
4723 			break;
4724 		case AMD_FLASHID_512K:
4725 		case AMD_FLASHID_512Kt:
4726 		case AMD_FLASHID_512Kb:
4727 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
4728 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4729 			} else {
4730 				xp->fdesc.flash_size = 0x80000;
4731 			}
4732 			break;
4733 		case AMD_FLASHID_128K:
4734 			xp->fdesc.flash_size = 0x20000;
4735 			break;
4736 		default:
4737 			rval = QL_FUNCTION_FAILED;
4738 			break;
4739 		}
4740 		break;
4741 	case ST_FLASH:
4742 		switch (xp->fdesc.flash_id) {
4743 		case ST_FLASHID_128K:
4744 			xp->fdesc.flash_size = 0x20000;
4745 			break;
4746 		case ST_FLASHID_512K:
4747 			xp->fdesc.flash_size = 0x80000;
4748 			break;
4749 		case ST_FLASHID_M25PXX:
4750 			if (xp->fdesc.flash_len == 0x14) {
4751 				xp->fdesc.flash_size = 0x100000;
4752 			} else if (xp->fdesc.flash_len == 0x15) {
4753 				xp->fdesc.flash_size = 0x200000;
4754 			} else {
4755 				rval = QL_FUNCTION_FAILED;
4756 			}
4757 			break;
4758 		default:
4759 			rval = QL_FUNCTION_FAILED;
4760 			break;
4761 		}
4762 		break;
4763 	case SST_FLASH:
4764 		switch (xp->fdesc.flash_id) {
4765 		case SST_FLASHID_128K:
4766 			xp->fdesc.flash_size = 0x20000;
4767 			break;
4768 		case SST_FLASHID_1024K_A:
4769 			xp->fdesc.flash_size = 0x100000;
4770 			xp->fdesc.block_size = 0x8000;
4771 			xp->fdesc.erase_cmd = 0x52;
4772 			break;
4773 		case SST_FLASHID_1024K:
4774 		case SST_FLASHID_1024K_B:
4775 			xp->fdesc.flash_size = 0x100000;
4776 			break;
4777 		case SST_FLASHID_2048K:
4778 			xp->fdesc.flash_size = 0x200000;
4779 			break;
4780 		default:
4781 			rval = QL_FUNCTION_FAILED;
4782 			break;
4783 		}
4784 		break;
4785 	case MXIC_FLASH:
4786 		switch (xp->fdesc.flash_id) {
4787 		case MXIC_FLASHID_512K:
4788 			xp->fdesc.flash_size = 0x80000;
4789 			break;
4790 		case MXIC_FLASHID_1024K:
4791 			xp->fdesc.flash_size = 0x100000;
4792 			break;
4793 		case MXIC_FLASHID_25LXX:
4794 			if (xp->fdesc.flash_len == 0x14) {
4795 				xp->fdesc.flash_size = 0x100000;
4796 			} else if (xp->fdesc.flash_len == 0x15) {
4797 				xp->fdesc.flash_size = 0x200000;
4798 			} else {
4799 				rval = QL_FUNCTION_FAILED;
4800 			}
4801 			break;
4802 		default:
4803 			rval = QL_FUNCTION_FAILED;
4804 			break;
4805 		}
4806 		break;
4807 	case ATMEL_FLASH:
4808 		switch (xp->fdesc.flash_id) {
4809 		case ATMEL_FLASHID_1024K:
4810 			xp->fdesc.flash_size = 0x100000;
4811 			xp->fdesc.write_disable_bits = 0xbc;
4812 			xp->fdesc.unprotect_sector_cmd = 0x39;
4813 			xp->fdesc.protect_sector_cmd = 0x36;
4814 			break;
4815 		default:
4816 			rval = QL_FUNCTION_FAILED;
4817 			break;
4818 		}
4819 		break;
4820 	case WINBOND_FLASH:
4821 		switch (xp->fdesc.flash_id) {
4822 		case WINBOND_FLASHID:
4823 			if (xp->fdesc.flash_len == 0x15) {
4824 				xp->fdesc.flash_size = 0x200000;
4825 			} else if (xp->fdesc.flash_len == 0x16) {
4826 				xp->fdesc.flash_size = 0x400000;
4827 			} else if (xp->fdesc.flash_len == 0x17) {
4828 				xp->fdesc.flash_size = 0x800000;
4829 			} else {
4830 				rval = QL_FUNCTION_FAILED;
4831 			}
4832 			break;
4833 		default:
4834 			rval = QL_FUNCTION_FAILED;
4835 			break;
4836 		}
4837 		break;
4838 	case INTEL_FLASH:
4839 		switch (xp->fdesc.flash_id) {
4840 		case INTEL_FLASHID:
4841 			if (xp->fdesc.flash_len == 0x11) {
4842 				xp->fdesc.flash_size = 0x200000;
4843 			} else if (xp->fdesc.flash_len == 0x12) {
4844 				xp->fdesc.flash_size = 0x400000;
4845 			} else if (xp->fdesc.flash_len == 0x13) {
4846 				xp->fdesc.flash_size = 0x800000;
4847 			} else {
4848 				rval = QL_FUNCTION_FAILED;
4849 			}
4850 			break;
4851 		default:
4852 			rval = QL_FUNCTION_FAILED;
4853 			break;
4854 		}
4855 		break;
4856 	default:
4857 		rval = QL_FUNCTION_FAILED;
4858 		break;
4859 	}
4860 
4861 	/* Try flash table later. */
4862 	if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_242581)) {
4863 		EL(ha, "no default id\n");
4864 		return (QL_SUCCESS);
4865 	}
4866 
4867 	/*
4868 	 * hack for non std 2312 and 6312 boards. hardware people need to
4869 	 * use either the 128k flash chip (original), or something larger.
4870 	 * For driver purposes, we'll treat it as a 128k flash chip.
4871 	 */
4872 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
4873 	    ha->device_id == 0x6322) && (xp->fdesc.flash_size > 0x20000) &&
4874 	    (CFG_IST(ha, CFG_SBUS_CARD) ==  0)) {
4875 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
4876 		    xp->fdesc.flash_size);
4877 		xp->fdesc.flash_size = 0x20000;
4878 	}
4879 
4880 	if (rval == QL_SUCCESS) {
4881 		EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
4882 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id,
4883 		    xp->fdesc.flash_size);
4884 	} else {
4885 		EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
4886 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id);
4887 	}
4888 
4889 	return (rval);
4890 }
4891 
4892 /*
4893  * ql_flash_fcode_load
4894  *	Loads fcode data into flash from application.
4895  *
4896  * Input:
4897  *	ha:	adapter state pointer.
4898  *	bp:	user buffer address.
4899  *	size:	user buffer size.
4900  *	mode:	flags
4901  *
4902  * Returns:
4903  *
4904  * Context:
4905  *	Kernel context.
4906  */
4907 static int
4908 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
4909     int mode)
4910 {
4911 	uint8_t		*bfp;
4912 	ql_xioctl_t	*xp = ha->xioctl;
4913 	int		rval = 0;
4914 
4915 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4916 
4917 	if (bsize > xp->fdesc.flash_size) {
4918 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
4919 		    xp->fdesc.flash_size);
4920 		return (ENOMEM);
4921 	}
4922 
4923 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
4924 		EL(ha, "failed, kmem_zalloc\n");
4925 		rval = ENOMEM;
4926 	} else  {
4927 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
4928 			EL(ha, "failed, ddi_copyin\n");
4929 			rval = EFAULT;
4930 		} else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
4931 			EL(ha, "failed, load_fcode\n");
4932 			rval = EFAULT;
4933 		} else {
4934 			/* Reset caches on all adapter instances. */
4935 			ql_update_flash_caches(ha);
4936 			rval = 0;
4937 		}
4938 		kmem_free(bfp, bsize);
4939 	}
4940 
4941 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4942 
4943 	return (rval);
4944 }
4945 
4946 /*
4947  * ql_load_fcode
4948  *	Loads fcode in to flash.
4949  *
4950  * Input:
4951  *	ha:	adapter state pointer.
4952  *	dp:	data pointer.
4953  *	size:	data length.
4954  *	addr:	flash byte address.
4955  *
4956  * Returns:
4957  *	ql local function return status code.
4958  *
4959  * Context:
4960  *	Kernel context.
4961  */
4962 int
4963 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
4964 {
4965 	uint32_t	cnt;
4966 	int		rval;
4967 
4968 	if (CFG_IST(ha, CFG_CTRL_242581)) {
4969 		return (ql_24xx_load_flash(ha, dp, size, addr));
4970 	}
4971 
4972 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4973 
4974 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
4975 		/*
4976 		 * sbus has an additional check to make
4977 		 * sure they don't brick the HBA.
4978 		 */
4979 		if (dp[0] != 0xf1) {
4980 			EL(ha, "failed, incorrect fcode for sbus\n");
4981 			return (QL_FUNCTION_PARAMETER_ERROR);
4982 		}
4983 	}
4984 
4985 	GLOBAL_HW_LOCK();
4986 
4987 	/* Enable Flash Read/Write. */
4988 	ql_flash_enable(ha);
4989 
4990 	/* Erase flash prior to write. */
4991 	rval = ql_erase_flash(ha, 0);
4992 
4993 	if (rval == QL_SUCCESS) {
4994 		/* Write fcode data to flash. */
4995 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
4996 			/* Allow other system activity. */
4997 			if (cnt % 0x1000 == 0) {
4998 				drv_usecwait(1);
4999 			}
5000 			rval = ql_program_flash_address(ha, addr++, *dp++);
5001 			if (rval != QL_SUCCESS)
5002 				break;
5003 		}
5004 	}
5005 
5006 	ql_flash_disable(ha);
5007 
5008 	GLOBAL_HW_UNLOCK();
5009 
5010 	if (rval != QL_SUCCESS) {
5011 		EL(ha, "failed, rval=%xh\n", rval);
5012 	} else {
5013 		/*EMPTY*/
5014 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5015 	}
5016 	return (rval);
5017 }
5018 
5019 /*
5020  * ql_flash_fcode_dump
5021  *	Dumps FLASH to application.
5022  *
5023  * Input:
5024  *	ha:	adapter state pointer.
5025  *	bp:	user buffer address.
5026  *	bsize:	user buffer size
5027  *	faddr:	flash byte address
5028  *	mode:	flags
5029  *
5030  * Returns:
5031  *
5032  * Context:
5033  *	Kernel context.
5034  */
5035 static int
5036 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5037     uint32_t faddr, int mode)
5038 {
5039 	uint8_t		*bfp;
5040 	int		rval;
5041 	ql_xioctl_t	*xp = ha->xioctl;
5042 
5043 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5044 
5045 	/* adjust max read size to flash size */
5046 	if (bsize > xp->fdesc.flash_size) {
5047 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5048 		    xp->fdesc.flash_size);
5049 		bsize = xp->fdesc.flash_size;
5050 	}
5051 
5052 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5053 		EL(ha, "failed, kmem_zalloc\n");
5054 		rval = ENOMEM;
5055 	} else {
5056 		/* Dump Flash fcode. */
5057 		rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5058 
5059 		if (rval != QL_SUCCESS) {
5060 			EL(ha, "failed, dump_fcode = %x\n", rval);
5061 			rval = EFAULT;
5062 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5063 			EL(ha, "failed, ddi_copyout\n");
5064 			rval = EFAULT;
5065 		} else {
5066 			rval = 0;
5067 		}
5068 		kmem_free(bfp, bsize);
5069 	}
5070 
5071 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5072 
5073 	return (rval);
5074 }
5075 
5076 /*
5077  * ql_dump_fcode
5078  *	Dumps fcode from flash.
5079  *
5080  * Input:
5081  *	ha:		adapter state pointer.
5082  *	dp:		data pointer.
5083  *	size:		data length in bytes.
5084  *	startpos:	starting position in flash (byte address).
5085  *
5086  * Returns:
5087  *	ql local function return status code.
5088  *
5089  * Context:
5090  *	Kernel context.
5091  *
5092  */
5093 int
5094 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5095     uint32_t startpos)
5096 {
5097 	uint32_t	cnt, data, addr;
5098 	uint8_t		bp[4];
5099 	int		rval = QL_SUCCESS;
5100 
5101 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5102 
5103 	/* make sure startpos+size doesn't exceed flash */
5104 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5105 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5106 		    size, startpos, ha->xioctl->fdesc.flash_size);
5107 		return (QL_FUNCTION_PARAMETER_ERROR);
5108 	}
5109 
5110 	if (CFG_IST(ha, CFG_CTRL_242581)) {
5111 		/* check start addr is 32 bit aligned for 24xx */
5112 		if ((startpos & 0x3) != 0) {
5113 			rval = ql_24xx_read_flash(ha,
5114 			    ha->flash_data_addr | startpos >> 2, &data);
5115 			if (rval != QL_SUCCESS) {
5116 				EL(ha, "failed2, rval = %xh\n", rval);
5117 				return (rval);
5118 			}
5119 			bp[0] = LSB(LSW(data));
5120 			bp[1] = MSB(LSW(data));
5121 			bp[2] = LSB(MSW(data));
5122 			bp[3] = MSB(MSW(data));
5123 			while (size && startpos & 0x3) {
5124 				*dp++ = bp[startpos & 0x3];
5125 				startpos++;
5126 				size--;
5127 			}
5128 			if (size == 0) {
5129 				QL_PRINT_9(CE_CONT, "(%d): done2\n",
5130 				    ha->instance);
5131 				return (rval);
5132 			}
5133 		}
5134 
5135 		/* adjust 24xx start addr for 32 bit words */
5136 		addr = startpos / 4 | ha->flash_data_addr;
5137 	}
5138 
5139 	GLOBAL_HW_LOCK();
5140 
5141 	/* Enable Flash Read/Write. */
5142 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
5143 		ql_flash_enable(ha);
5144 	}
5145 
5146 	/* Read fcode data from flash. */
5147 	while (size) {
5148 		/* Allow other system activity. */
5149 		if (size % 0x1000 == 0) {
5150 			ql_delay(ha, 100000);
5151 		}
5152 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5153 			rval = ql_24xx_read_flash(ha, addr++, &data);
5154 			if (rval != QL_SUCCESS) {
5155 				break;
5156 			}
5157 			bp[0] = LSB(LSW(data));
5158 			bp[1] = MSB(LSW(data));
5159 			bp[2] = LSB(MSW(data));
5160 			bp[3] = MSB(MSW(data));
5161 			for (cnt = 0; size && cnt < 4; size--) {
5162 				*dp++ = bp[cnt++];
5163 			}
5164 		} else {
5165 			*dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5166 			size--;
5167 		}
5168 	}
5169 
5170 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
5171 		ql_flash_disable(ha);
5172 	}
5173 
5174 	GLOBAL_HW_UNLOCK();
5175 
5176 	if (rval != QL_SUCCESS) {
5177 		EL(ha, "failed, rval = %xh\n", rval);
5178 	} else {
5179 		/*EMPTY*/
5180 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5181 	}
5182 	return (rval);
5183 }
5184 
5185 /*
5186  * ql_program_flash_address
5187  *	Program flash address.
5188  *
5189  * Input:
5190  *	ha:	adapter state pointer.
5191  *	addr:	flash byte address.
5192  *	data:	data to be written to flash.
5193  *
5194  * Returns:
5195  *	ql local function return status code.
5196  *
5197  * Context:
5198  *	Kernel context.
5199  */
5200 static int
5201 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5202     uint8_t data)
5203 {
5204 	int	rval;
5205 
5206 	/* Write Program Command Sequence */
5207 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5208 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5209 		ql_write_flash_byte(ha, addr, data);
5210 	} else {
5211 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5212 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5213 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5214 		ql_write_flash_byte(ha, addr, data);
5215 	}
5216 
5217 	/* Wait for write to complete. */
5218 	rval = ql_poll_flash(ha, addr, data);
5219 
5220 	if (rval != QL_SUCCESS) {
5221 		EL(ha, "failed, rval=%xh\n", rval);
5222 	}
5223 	return (rval);
5224 }
5225 
5226 /*
5227  * ql_set_rnid_parameters
5228  *	Set RNID parameters.
5229  *
5230  * Input:
5231  *	ha:	adapter state pointer.
5232  *	cmd:	User space CT arguments pointer.
5233  *	mode:	flags.
5234  */
5235 static void
5236 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5237 {
5238 	EXT_SET_RNID_REQ	tmp_set;
5239 	EXT_RNID_DATA		*tmp_buf;
5240 	int			rval = 0;
5241 
5242 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5243 
5244 	if (DRIVER_SUSPENDED(ha)) {
5245 		EL(ha, "failed, LOOP_NOT_READY\n");
5246 		cmd->Status = EXT_STATUS_BUSY;
5247 		cmd->ResponseLen = 0;
5248 		return;
5249 	}
5250 
5251 	cmd->ResponseLen = 0; /* NO response to caller. */
5252 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5253 		/* parameter error */
5254 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5255 		    cmd->RequestLen);
5256 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5257 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5258 		cmd->ResponseLen = 0;
5259 		return;
5260 	}
5261 
5262 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5263 	    cmd->RequestLen, mode);
5264 	if (rval != 0) {
5265 		EL(ha, "failed, ddi_copyin\n");
5266 		cmd->Status = EXT_STATUS_COPY_ERR;
5267 		cmd->ResponseLen = 0;
5268 		return;
5269 	}
5270 
5271 	/* Allocate memory for command. */
5272 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5273 	if (tmp_buf == NULL) {
5274 		EL(ha, "failed, kmem_zalloc\n");
5275 		cmd->Status = EXT_STATUS_NO_MEMORY;
5276 		cmd->ResponseLen = 0;
5277 		return;
5278 	}
5279 
5280 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5281 	    (caddr_t)tmp_buf);
5282 	if (rval != QL_SUCCESS) {
5283 		/* error */
5284 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5285 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5286 		cmd->Status = EXT_STATUS_ERR;
5287 		cmd->ResponseLen = 0;
5288 		return;
5289 	}
5290 
5291 	/* Now set the requested params. */
5292 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5293 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5294 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5295 
5296 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5297 	    (caddr_t)tmp_buf);
5298 	if (rval != QL_SUCCESS) {
5299 		/* error */
5300 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5301 		cmd->Status = EXT_STATUS_ERR;
5302 		cmd->ResponseLen = 0;
5303 	}
5304 
5305 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5306 
5307 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5308 }
5309 
5310 /*
5311  * ql_get_rnid_parameters
5312  *	Get RNID parameters.
5313  *
5314  * Input:
5315  *	ha:	adapter state pointer.
5316  *	cmd:	User space CT arguments pointer.
5317  *	mode:	flags.
5318  */
5319 static void
5320 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5321 {
5322 	EXT_RNID_DATA	*tmp_buf;
5323 	uint32_t	rval;
5324 
5325 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5326 
5327 	if (DRIVER_SUSPENDED(ha)) {
5328 		EL(ha, "failed, LOOP_NOT_READY\n");
5329 		cmd->Status = EXT_STATUS_BUSY;
5330 		cmd->ResponseLen = 0;
5331 		return;
5332 	}
5333 
5334 	/* Allocate memory for command. */
5335 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5336 	if (tmp_buf == NULL) {
5337 		EL(ha, "failed, kmem_zalloc\n");
5338 		cmd->Status = EXT_STATUS_NO_MEMORY;
5339 		cmd->ResponseLen = 0;
5340 		return;
5341 	}
5342 
5343 	/* Send command */
5344 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5345 	    (caddr_t)tmp_buf);
5346 	if (rval != QL_SUCCESS) {
5347 		/* error */
5348 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5349 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5350 		cmd->Status = EXT_STATUS_ERR;
5351 		cmd->ResponseLen = 0;
5352 		return;
5353 	}
5354 
5355 	/* Copy the response */
5356 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5357 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5358 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5359 		EL(ha, "failed, ddi_copyout\n");
5360 		cmd->Status = EXT_STATUS_COPY_ERR;
5361 		cmd->ResponseLen = 0;
5362 	} else {
5363 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5364 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5365 	}
5366 
5367 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5368 }
5369 
5370 /*
5371  * ql_reset_statistics
5372  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5373  *
5374  * Input:
5375  *	ha:	adapter state pointer.
5376  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5377  *
5378  * Returns:
5379  *	None, request status indicated in cmd->Status.
5380  *
5381  * Context:
5382  *	Kernel context.
5383  */
5384 static int
5385 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5386 {
5387 	ql_xioctl_t		*xp = ha->xioctl;
5388 	int			rval = 0;
5389 
5390 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5391 
5392 	if (DRIVER_SUSPENDED(ha)) {
5393 		EL(ha, "failed, LOOP_NOT_READY\n");
5394 		cmd->Status = EXT_STATUS_BUSY;
5395 		cmd->ResponseLen = 0;
5396 		return (QL_FUNCTION_SUSPENDED);
5397 	}
5398 
5399 	rval = ql_reset_link_status(ha);
5400 	if (rval != QL_SUCCESS) {
5401 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5402 		cmd->Status = EXT_STATUS_MAILBOX;
5403 		cmd->DetailStatus = rval;
5404 		cmd->ResponseLen = 0;
5405 	}
5406 
5407 	TASK_DAEMON_LOCK(ha);
5408 	xp->IosRequested = 0;
5409 	xp->BytesRequested = 0;
5410 	xp->IOInputRequests = 0;
5411 	xp->IOOutputRequests = 0;
5412 	xp->IOControlRequests = 0;
5413 	xp->IOInputMByteCnt = 0;
5414 	xp->IOOutputMByteCnt = 0;
5415 	xp->IOOutputByteCnt = 0;
5416 	xp->IOInputByteCnt = 0;
5417 	TASK_DAEMON_UNLOCK(ha);
5418 
5419 	INTR_LOCK(ha);
5420 	xp->ControllerErrorCount = 0;
5421 	xp->DeviceErrorCount = 0;
5422 	xp->TotalLipResets = 0;
5423 	xp->TotalInterrupts = 0;
5424 	INTR_UNLOCK(ha);
5425 
5426 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5427 
5428 	return (rval);
5429 }
5430 
5431 /*
5432  * ql_get_statistics
5433  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5434  *
5435  * Input:
5436  *	ha:	adapter state pointer.
5437  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5438  *	mode:	flags.
5439  *
5440  * Returns:
5441  *	None, request status indicated in cmd->Status.
5442  *
5443  * Context:
5444  *	Kernel context.
5445  */
5446 static void
5447 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5448 {
5449 	EXT_HBA_PORT_STAT	ps = {0};
5450 	ql_link_stats_t		*ls;
5451 	int			rval;
5452 	ql_xioctl_t		*xp = ha->xioctl;
5453 	int			retry = 10;
5454 
5455 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5456 
5457 	while (ha->task_daemon_flags &
5458 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5459 		ql_delay(ha, 10000000);	/* 10 second delay */
5460 
5461 		retry--;
5462 
5463 		if (retry == 0) { /* effectively 100 seconds */
5464 			EL(ha, "failed, LOOP_NOT_READY\n");
5465 			cmd->Status = EXT_STATUS_BUSY;
5466 			cmd->ResponseLen = 0;
5467 			return;
5468 		}
5469 	}
5470 
5471 	/* Allocate memory for command. */
5472 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5473 	if (ls == NULL) {
5474 		EL(ha, "failed, kmem_zalloc\n");
5475 		cmd->Status = EXT_STATUS_NO_MEMORY;
5476 		cmd->ResponseLen = 0;
5477 		return;
5478 	}
5479 
5480 	/*
5481 	 * I think these are supposed to be port statistics
5482 	 * the loop ID or port ID should be in cmd->Instance.
5483 	 */
5484 	rval = ql_get_status_counts(ha, (uint16_t)
5485 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5486 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5487 	if (rval != QL_SUCCESS) {
5488 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5489 		    ha->loop_id);
5490 		cmd->Status = EXT_STATUS_MAILBOX;
5491 		cmd->DetailStatus = rval;
5492 		cmd->ResponseLen = 0;
5493 	} else {
5494 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5495 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5496 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5497 		    xp->IOOutputRequests + xp->IOControlRequests);
5498 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5499 		    xp->IOOutputMByteCnt);
5500 		ps.LipResetCount = xp->TotalLipResets;
5501 		ps.InterruptCount = xp->TotalInterrupts;
5502 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5503 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5504 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5505 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5506 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5507 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5508 
5509 		rval = ddi_copyout((void *)&ps,
5510 		    (void *)(uintptr_t)cmd->ResponseAdr,
5511 		    sizeof (EXT_HBA_PORT_STAT), mode);
5512 		if (rval != 0) {
5513 			EL(ha, "failed, ddi_copyout\n");
5514 			cmd->Status = EXT_STATUS_COPY_ERR;
5515 			cmd->ResponseLen = 0;
5516 		} else {
5517 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5518 		}
5519 	}
5520 
5521 	kmem_free(ls, sizeof (ql_link_stats_t));
5522 
5523 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5524 }
5525 
5526 /*
5527  * ql_get_statistics_fc
5528  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5529  *
5530  * Input:
5531  *	ha:	adapter state pointer.
5532  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5533  *	mode:	flags.
5534  *
5535  * Returns:
5536  *	None, request status indicated in cmd->Status.
5537  *
5538  * Context:
5539  *	Kernel context.
5540  */
5541 static void
5542 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5543 {
5544 	EXT_HBA_PORT_STAT	ps = {0};
5545 	ql_link_stats_t		*ls;
5546 	int			rval;
5547 	uint16_t		qlnt;
5548 	EXT_DEST_ADDR		pextdestaddr;
5549 	uint8_t			*name;
5550 	ql_tgt_t		*tq = NULL;
5551 	int			retry = 10;
5552 
5553 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5554 
5555 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5556 	    (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5557 		EL(ha, "failed, ddi_copyin\n");
5558 		cmd->Status = EXT_STATUS_COPY_ERR;
5559 		cmd->ResponseLen = 0;
5560 		return;
5561 	}
5562 
5563 	qlnt = QLNT_PORT;
5564 	name = pextdestaddr.DestAddr.WWPN;
5565 
5566 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5567 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5568 	    name[5], name[6], name[7]);
5569 
5570 	tq = ql_find_port(ha, name, qlnt);
5571 
5572 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5573 		EL(ha, "failed, fc_port not found\n");
5574 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5575 		cmd->ResponseLen = 0;
5576 		return;
5577 	}
5578 
5579 	while (ha->task_daemon_flags &
5580 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE  | DRIVER_STALL)) {
5581 		ql_delay(ha, 10000000);	/* 10 second delay */
5582 
5583 		retry--;
5584 
5585 		if (retry == 0) { /* effectively 100 seconds */
5586 			EL(ha, "failed, LOOP_NOT_READY\n");
5587 			cmd->Status = EXT_STATUS_BUSY;
5588 			cmd->ResponseLen = 0;
5589 			return;
5590 		}
5591 	}
5592 
5593 	/* Allocate memory for command. */
5594 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5595 	if (ls == NULL) {
5596 		EL(ha, "failed, kmem_zalloc\n");
5597 		cmd->Status = EXT_STATUS_NO_MEMORY;
5598 		cmd->ResponseLen = 0;
5599 		return;
5600 	}
5601 
5602 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5603 	    (caddr_t)ls, 0);
5604 	if (rval != QL_SUCCESS) {
5605 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5606 		    tq->d_id.b24);
5607 		cmd->Status = EXT_STATUS_MAILBOX;
5608 		cmd->DetailStatus = rval;
5609 		cmd->ResponseLen = 0;
5610 	} else {
5611 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5612 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5613 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5614 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5615 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5616 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5617 
5618 		rval = ddi_copyout((void *)&ps,
5619 		    (void *)(uintptr_t)cmd->ResponseAdr,
5620 		    sizeof (EXT_HBA_PORT_STAT), mode);
5621 
5622 		if (rval != 0) {
5623 			EL(ha, "failed, ddi_copyout\n");
5624 			cmd->Status = EXT_STATUS_COPY_ERR;
5625 			cmd->ResponseLen = 0;
5626 		} else {
5627 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5628 		}
5629 	}
5630 
5631 	kmem_free(ls, sizeof (ql_link_stats_t));
5632 
5633 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5634 }
5635 
5636 /*
5637  * ql_get_statistics_fc4
5638  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5639  *
5640  * Input:
5641  *	ha:	adapter state pointer.
5642  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5643  *	mode:	flags.
5644  *
5645  * Returns:
5646  *	None, request status indicated in cmd->Status.
5647  *
5648  * Context:
5649  *	Kernel context.
5650  */
5651 static void
5652 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5653 {
5654 	uint32_t		rval;
5655 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5656 	ql_xioctl_t		*xp = ha->xioctl;
5657 
5658 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5659 
5660 	fc4stats.InputRequests = xp->IOInputRequests;
5661 	fc4stats.OutputRequests = xp->IOOutputRequests;
5662 	fc4stats.ControlRequests = xp->IOControlRequests;
5663 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5664 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5665 
5666 	rval = ddi_copyout((void *)&fc4stats,
5667 	    (void *)(uintptr_t)cmd->ResponseAdr,
5668 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
5669 
5670 	if (rval != 0) {
5671 		EL(ha, "failed, ddi_copyout\n");
5672 		cmd->Status = EXT_STATUS_COPY_ERR;
5673 		cmd->ResponseLen = 0;
5674 	} else {
5675 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5676 	}
5677 
5678 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5679 }
5680 
5681 /*
5682  * ql_set_led_state
5683  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5684  *
5685  * Input:
5686  *	ha:	adapter state pointer.
5687  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5688  *	mode:	flags.
5689  *
5690  * Returns:
5691  *	None, request status indicated in cmd->Status.
5692  *
5693  * Context:
5694  *	Kernel context.
5695  */
5696 static void
5697 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5698 {
5699 	EXT_BEACON_CONTROL	bstate;
5700 	uint32_t		rval;
5701 	ql_xioctl_t		*xp = ha->xioctl;
5702 
5703 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5704 
5705 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5706 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5707 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5708 		EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
5709 		    " Len=%xh\n", cmd->RequestLen);
5710 		cmd->ResponseLen = 0;
5711 		return;
5712 	}
5713 
5714 	if (ha->device_id < 0x2300) {
5715 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5716 		cmd->DetailStatus = 0;
5717 		EL(ha, "done - failed, Invalid function for HBA model\n");
5718 		cmd->ResponseLen = 0;
5719 		return;
5720 	}
5721 
5722 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5723 	    cmd->RequestLen, mode);
5724 
5725 	if (rval != 0) {
5726 		cmd->Status = EXT_STATUS_COPY_ERR;
5727 		EL(ha, "done -  failed, ddi_copyin\n");
5728 		return;
5729 	}
5730 
5731 	switch (bstate.State) {
5732 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
5733 		if (xp->ledstate.BeaconState == BEACON_OFF) {
5734 			/* not quite an error -- LED state is already off */
5735 			cmd->Status = EXT_STATUS_OK;
5736 			EL(ha, "LED off request -- LED is already off\n");
5737 			break;
5738 		}
5739 
5740 		xp->ledstate.BeaconState = BEACON_OFF;
5741 		xp->ledstate.LEDflags = LED_ALL_OFF;
5742 
5743 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5744 			cmd->Status = EXT_STATUS_MAILBOX;
5745 		} else {
5746 			cmd->Status = EXT_STATUS_OK;
5747 		}
5748 		break;
5749 
5750 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
5751 		if (xp->ledstate.BeaconState == BEACON_ON) {
5752 			/* not quite an error -- LED state is already on */
5753 			cmd->Status = EXT_STATUS_OK;
5754 			EL(ha, "LED on request  - LED is already on\n");
5755 			break;
5756 		}
5757 
5758 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5759 			cmd->Status = EXT_STATUS_MAILBOX;
5760 			break;
5761 		}
5762 
5763 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5764 			xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5765 		} else {
5766 			xp->ledstate.LEDflags = LED_GREEN;
5767 		}
5768 		xp->ledstate.BeaconState = BEACON_ON;
5769 
5770 		cmd->Status = EXT_STATUS_OK;
5771 		break;
5772 	default:
5773 		cmd->Status = EXT_STATUS_ERR;
5774 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
5775 		break;
5776 	}
5777 
5778 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5779 }
5780 
5781 /*
5782  * ql_get_led_state
5783  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5784  *
5785  * Input:
5786  *	ha:	adapter state pointer.
5787  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5788  *	mode:	flags.
5789  *
5790  * Returns:
5791  *	None, request status indicated in cmd->Status.
5792  *
5793  * Context:
5794  *	Kernel context.
5795  */
5796 static void
5797 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5798 {
5799 	EXT_BEACON_CONTROL	bstate = {0};
5800 	uint32_t		rval;
5801 	ql_xioctl_t		*xp = ha->xioctl;
5802 
5803 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5804 
5805 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5806 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5807 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5808 		EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
5809 		    "Len=%xh\n", cmd->ResponseLen);
5810 		cmd->ResponseLen = 0;
5811 		return;
5812 	}
5813 
5814 	if (ha->device_id < 0x2300) {
5815 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5816 		cmd->DetailStatus = 0;
5817 		EL(ha, "done - failed, Invalid function for HBA model\n");
5818 		cmd->ResponseLen = 0;
5819 		return;
5820 	}
5821 
5822 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5823 		cmd->Status = EXT_STATUS_BUSY;
5824 		EL(ha, "done -  failed, isp abort active\n");
5825 		cmd->ResponseLen = 0;
5826 		return;
5827 	}
5828 
5829 	/* inform the user of the current beacon state (off or on) */
5830 	bstate.State = xp->ledstate.BeaconState;
5831 
5832 	rval = ddi_copyout((void *)&bstate,
5833 	    (void *)(uintptr_t)cmd->ResponseAdr,
5834 	    sizeof (EXT_BEACON_CONTROL), mode);
5835 
5836 	if (rval != 0) {
5837 		EL(ha, "failed, ddi_copyout\n");
5838 		cmd->Status = EXT_STATUS_COPY_ERR;
5839 		cmd->ResponseLen = 0;
5840 	} else {
5841 		cmd->Status = EXT_STATUS_OK;
5842 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
5843 	}
5844 
5845 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5846 }
5847 
5848 /*
5849  * ql_blink_led
5850  *	Determine the next state of the LED and drive it
5851  *
5852  * Input:
5853  *	ha:	adapter state pointer.
5854  *
5855  * Context:
5856  *	Interrupt context.
5857  */
5858 void
5859 ql_blink_led(ql_adapter_state_t *ha)
5860 {
5861 	uint32_t		nextstate;
5862 	ql_xioctl_t		*xp = ha->xioctl;
5863 
5864 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5865 
5866 	if (xp->ledstate.BeaconState == BEACON_ON) {
5867 		/* determine the next led state */
5868 		if (CFG_IST(ha, CFG_CTRL_242581)) {
5869 			nextstate = (xp->ledstate.LEDflags) &
5870 			    (~(RD32_IO_REG(ha, gpiod)));
5871 		} else {
5872 			nextstate = (xp->ledstate.LEDflags) &
5873 			    (~(RD16_IO_REG(ha, gpiod)));
5874 		}
5875 
5876 		/* turn the led on or off */
5877 		ql_drive_led(ha, nextstate);
5878 	}
5879 
5880 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5881 }
5882 
5883 /*
5884  * ql_drive_led
5885  *	drive the led's as determined by LEDflags
5886  *
5887  * Input:
5888  *	ha:		adapter state pointer.
5889  *	LEDflags:	LED flags
5890  *
5891  * Context:
5892  *	Kernel/Interrupt context.
5893  */
5894 static void
5895 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
5896 {
5897 
5898 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5899 
5900 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
5901 
5902 		uint16_t	gpio_enable, gpio_data;
5903 
5904 		/* setup to send new data */
5905 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
5906 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
5907 		WRT16_IO_REG(ha, gpioe, gpio_enable);
5908 
5909 		/* read current data and clear out old led data */
5910 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
5911 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
5912 
5913 		/* set in the new led data. */
5914 		gpio_data = (uint16_t)(gpio_data | LEDflags);
5915 
5916 		/* write out the new led data */
5917 		WRT16_IO_REG(ha, gpiod, gpio_data);
5918 
5919 	} else if (CFG_IST(ha, CFG_CTRL_242581)) {
5920 
5921 		uint32_t	gpio_data;
5922 
5923 		/* setup to send new data */
5924 		gpio_data = RD32_IO_REG(ha, gpiod);
5925 		gpio_data |= LED_MASK_UPDATE_24;
5926 		WRT32_IO_REG(ha, gpiod, gpio_data);
5927 
5928 		/* read current data and clear out old led data */
5929 		gpio_data = RD32_IO_REG(ha, gpiod);
5930 		gpio_data &= ~LED_MASK_COLORS_24;
5931 
5932 		/* set in the new led data */
5933 		gpio_data |= LEDflags;
5934 
5935 		/* write out the new led data */
5936 		WRT32_IO_REG(ha, gpiod, gpio_data);
5937 
5938 	} else {
5939 		EL(ha, "unsupported HBA: %xh", ha->device_id);
5940 	}
5941 
5942 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5943 }
5944 
5945 /*
5946  * ql_setup_led
5947  *	Setup LED for driver control
5948  *
5949  * Input:
5950  *	ha:	adapter state pointer.
5951  *
5952  * Context:
5953  *	Kernel/Interrupt context.
5954  */
5955 static uint32_t
5956 ql_setup_led(ql_adapter_state_t *ha)
5957 {
5958 	uint32_t	rval;
5959 	ql_mbx_data_t	mr;
5960 
5961 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5962 
5963 	/* decouple the LED control from the fw */
5964 	rval = ql_get_firmware_option(ha, &mr);
5965 	if (rval != QL_SUCCESS) {
5966 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
5967 		return (rval);
5968 	}
5969 
5970 	/* set the appropriate options */
5971 	mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
5972 
5973 	/* send it back to the firmware */
5974 	rval = ql_set_firmware_option(ha, &mr);
5975 	if (rval != QL_SUCCESS) {
5976 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
5977 		return (rval);
5978 	}
5979 
5980 	/* initally, turn the LED's off */
5981 	ql_drive_led(ha, LED_ALL_OFF);
5982 
5983 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5984 
5985 	return (rval);
5986 }
5987 
5988 /*
5989  * ql_wrapup_led
5990  *	Return LED control to the firmware
5991  *
5992  * Input:
5993  *	ha:	adapter state pointer.
5994  *
5995  * Context:
5996  *	Kernel/Interrupt context.
5997  */
5998 static uint32_t
5999 ql_wrapup_led(ql_adapter_state_t *ha)
6000 {
6001 	uint32_t	rval;
6002 	ql_mbx_data_t	mr;
6003 
6004 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6005 
6006 	/* Turn all LED's off */
6007 	ql_drive_led(ha, LED_ALL_OFF);
6008 
6009 	if (CFG_IST(ha, CFG_CTRL_242581)) {
6010 
6011 		uint32_t	gpio_data;
6012 
6013 		/* disable the LED update mask */
6014 		gpio_data = RD32_IO_REG(ha, gpiod);
6015 		gpio_data &= ~LED_MASK_UPDATE_24;
6016 
6017 		/* write out the data */
6018 		WRT32_IO_REG(ha, gpiod, gpio_data);
6019 	}
6020 
6021 	/* give LED control back to the f/w */
6022 	rval = ql_get_firmware_option(ha, &mr);
6023 	if (rval != QL_SUCCESS) {
6024 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6025 		return (rval);
6026 	}
6027 
6028 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6029 
6030 	rval = ql_set_firmware_option(ha, &mr);
6031 	if (rval != QL_SUCCESS) {
6032 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6033 		return (rval);
6034 	}
6035 
6036 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6037 
6038 	return (rval);
6039 }
6040 
6041 /*
6042  * ql_get_port_summary
6043  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6044  *
6045  *	The EXT_IOCTL->RequestAdr points to a single
6046  *	UINT32 which identifies the device type.
6047  *
6048  * Input:
6049  *	ha:	adapter state pointer.
6050  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6051  *	mode:	flags.
6052  *
6053  * Returns:
6054  *	None, request status indicated in cmd->Status.
6055  *
6056  * Context:
6057  *	Kernel context.
6058  */
6059 static void
6060 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6061 {
6062 	EXT_DEVICEDATA		dd = {0};
6063 	EXT_DEVICEDATA		*uddp;
6064 	ql_link_t		*link;
6065 	ql_tgt_t		*tq;
6066 	uint32_t		rlen, dev_type, index;
6067 	int			rval = 0;
6068 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6069 
6070 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6071 
6072 	ddep = &dd.EntryList[0];
6073 
6074 	/*
6075 	 * Get the type of device the requestor is looking for.
6076 	 *
6077 	 * We ignore this for now.
6078 	 */
6079 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6080 	    (void *)&dev_type, sizeof (dev_type), mode);
6081 	if (rval != 0) {
6082 		cmd->Status = EXT_STATUS_COPY_ERR;
6083 		cmd->ResponseLen = 0;
6084 		EL(ha, "failed, ddi_copyin\n");
6085 		return;
6086 	}
6087 	/*
6088 	 * Count the number of entries to be returned. Count devices
6089 	 * that are offlline, but have been persistently bound.
6090 	 */
6091 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6092 		for (link = ha->dev[index].first; link != NULL;
6093 		    link = link->next) {
6094 			tq = link->base_address;
6095 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6096 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6097 				continue;	/* Skip this one */
6098 			}
6099 			dd.TotalDevices++;
6100 		}
6101 	}
6102 	/*
6103 	 * Compute the number of entries that can be returned
6104 	 * based upon the size of caller's response buffer.
6105 	 */
6106 	dd.ReturnListEntryCount = 0;
6107 	if (dd.TotalDevices == 0) {
6108 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6109 	} else {
6110 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6111 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6112 	}
6113 	if (rlen > cmd->ResponseLen) {
6114 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6115 		cmd->DetailStatus = rlen;
6116 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6117 		    rlen, cmd->ResponseLen);
6118 		cmd->ResponseLen = 0;
6119 		return;
6120 	}
6121 	cmd->ResponseLen = 0;
6122 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6123 	uddep = &uddp->EntryList[0];
6124 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6125 		for (link = ha->dev[index].first; link != NULL;
6126 		    link = link->next) {
6127 			tq = link->base_address;
6128 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6129 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6130 				continue;	/* Skip this one */
6131 			}
6132 
6133 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6134 
6135 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6136 			bcopy(tq->port_name, ddep->PortWWN, 8);
6137 
6138 			ddep->PortID[0] = tq->d_id.b.domain;
6139 			ddep->PortID[1] = tq->d_id.b.area;
6140 			ddep->PortID[2] = tq->d_id.b.al_pa;
6141 
6142 			bcopy(tq->port_name,
6143 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6144 
6145 			ddep->DeviceFlags = tq->flags;
6146 			ddep->LoopID = tq->loop_id;
6147 			QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6148 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6149 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6150 			    ha->instance, ddep->TargetAddress.Target,
6151 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6152 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6153 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6154 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6155 			    ddep->PortWWN[0], ddep->PortWWN[1],
6156 			    ddep->PortWWN[2], ddep->PortWWN[3],
6157 			    ddep->PortWWN[4], ddep->PortWWN[5],
6158 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6159 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6160 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6161 
6162 			if (rval != 0) {
6163 				cmd->Status = EXT_STATUS_COPY_ERR;
6164 				cmd->ResponseLen = 0;
6165 				EL(ha, "failed, ddi_copyout\n");
6166 				break;
6167 			}
6168 			dd.ReturnListEntryCount++;
6169 			uddep++;
6170 			cmd->ResponseLen += (uint32_t)
6171 			    sizeof (EXT_DEVICEDATAENTRY);
6172 		}
6173 	}
6174 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6175 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6176 
6177 	if (rval != 0) {
6178 		cmd->Status = EXT_STATUS_COPY_ERR;
6179 		cmd->ResponseLen = 0;
6180 		EL(ha, "failed, ddi_copyout-2\n");
6181 	} else {
6182 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6183 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6184 	}
6185 }
6186 
6187 /*
6188  * ql_get_target_id
6189  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6190  *
6191  * Input:
6192  *	ha:	adapter state pointer.
6193  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6194  *	mode:	flags.
6195  *
6196  * Returns:
6197  *	None, request status indicated in cmd->Status.
6198  *
6199  * Context:
6200  *	Kernel context.
6201  */
6202 static void
6203 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6204 {
6205 	uint32_t		rval;
6206 	uint16_t		qlnt;
6207 	EXT_DEST_ADDR		extdestaddr = {0};
6208 	uint8_t			*name;
6209 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6210 	ql_tgt_t		*tq;
6211 
6212 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6213 
6214 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6215 	    (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6216 		EL(ha, "failed, ddi_copyin\n");
6217 		cmd->Status = EXT_STATUS_COPY_ERR;
6218 		cmd->ResponseLen = 0;
6219 		return;
6220 	}
6221 
6222 	qlnt = QLNT_PORT;
6223 	name = wwpn;
6224 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6225 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6226 	    name[5], name[6], name[7]);
6227 
6228 	tq = ql_find_port(ha, name, qlnt);
6229 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6230 		EL(ha, "failed, fc_port not found\n");
6231 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6232 		cmd->ResponseLen = 0;
6233 		return;
6234 	}
6235 
6236 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6237 
6238 	rval = ddi_copyout((void *)&extdestaddr,
6239 	    (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6240 	if (rval != 0) {
6241 		EL(ha, "failed, ddi_copyout\n");
6242 		cmd->Status = EXT_STATUS_COPY_ERR;
6243 		cmd->ResponseLen = 0;
6244 	}
6245 
6246 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6247 }
6248 
6249 /*
6250  * ql_setup_fcache
6251  *	Populates selected flash sections into the cache
6252  *
6253  * Input:
6254  *	ha = adapter state pointer.
6255  *
6256  * Returns:
6257  *	ql local function return status code.
6258  *
6259  * Context:
6260  *	Kernel context.
6261  *
6262  * Note:
6263  *	Driver must be in stalled state prior to entering or
6264  *	add code to this function prior to calling ql_setup_flash()
6265  */
6266 int
6267 ql_setup_fcache(ql_adapter_state_t *ha)
6268 {
6269 	int		rval;
6270 	uint32_t	freadpos = 0;
6271 	uint32_t	fw_done = 0;
6272 	ql_fcache_t	*head = NULL;
6273 	ql_fcache_t	*tail = NULL;
6274 	ql_fcache_t	*ftmp;
6275 
6276 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6277 
6278 	CACHE_LOCK(ha);
6279 
6280 	/* If we already have populated it, rtn */
6281 	if (ha->fcache != NULL) {
6282 		CACHE_UNLOCK(ha);
6283 		EL(ha, "buffer already populated\n");
6284 		return (QL_SUCCESS);
6285 	}
6286 
6287 	ql_flash_nvram_defaults(ha);
6288 
6289 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6290 		CACHE_UNLOCK(ha);
6291 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6292 		return (rval);
6293 	}
6294 
6295 	while (freadpos != 0xffffffff) {
6296 
6297 		/* Allocate & populate this node */
6298 
6299 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6300 			EL(ha, "node alloc failed\n");
6301 			rval = QL_FUNCTION_FAILED;
6302 			break;
6303 		}
6304 
6305 		/* link in the new node */
6306 		if (head == NULL) {
6307 			head = tail = ftmp;
6308 		} else {
6309 			tail->next = ftmp;
6310 			tail = ftmp;
6311 		}
6312 
6313 		/* Do the firmware node first for 24xx/25xx's */
6314 		if (fw_done == 0) {
6315 			if (CFG_IST(ha, CFG_CTRL_242581)) {
6316 				freadpos = ha->flash_fw_addr << 2;
6317 			}
6318 			fw_done = 1;
6319 		}
6320 
6321 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6322 		    freadpos)) != QL_SUCCESS) {
6323 			EL(ha, "failed, 24xx dump_fcode"
6324 			    " pos=%xh rval=%xh\n", freadpos, rval);
6325 			rval = QL_FUNCTION_FAILED;
6326 			break;
6327 		}
6328 
6329 		/* checkout the pci data / format */
6330 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6331 			EL(ha, "flash header incorrect\n");
6332 			rval = QL_FUNCTION_FAILED;
6333 			break;
6334 		}
6335 	}
6336 
6337 	if (rval != QL_SUCCESS) {
6338 		/* release all resources we have */
6339 		ftmp = head;
6340 		while (ftmp != NULL) {
6341 			tail = ftmp->next;
6342 			kmem_free(ftmp->buf, FBUFSIZE);
6343 			kmem_free(ftmp, sizeof (ql_fcache_t));
6344 			ftmp = tail;
6345 		}
6346 
6347 		EL(ha, "failed, done\n");
6348 	} else {
6349 		ha->fcache = head;
6350 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6351 	}
6352 	CACHE_UNLOCK(ha);
6353 
6354 	return (rval);
6355 }
6356 
6357 /*
6358  * ql_update_fcache
6359  *	re-populates updated flash into the fcache. If
6360  *	fcache does not exist (e.g., flash was empty/invalid on
6361  *	boot), this routine will create and the populate it.
6362  *
6363  * Input:
6364  *	ha	= adapter state pointer.
6365  *	*bpf 	= Pointer to flash buffer.
6366  *	bsize	= Size of flash buffer.
6367  *
6368  * Returns:
6369  *
6370  * Context:
6371  *	Kernel context.
6372  */
6373 void
6374 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6375 {
6376 	int		rval = QL_SUCCESS;
6377 	uint32_t	freadpos = 0;
6378 	uint32_t	fw_done = 0;
6379 	ql_fcache_t	*head = NULL;
6380 	ql_fcache_t	*tail = NULL;
6381 	ql_fcache_t	*ftmp;
6382 
6383 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6384 
6385 	while (freadpos != 0xffffffff) {
6386 
6387 		/* Allocate & populate this node */
6388 
6389 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6390 			EL(ha, "node alloc failed\n");
6391 			rval = QL_FUNCTION_FAILED;
6392 			break;
6393 		}
6394 
6395 		/* link in the new node */
6396 		if (head == NULL) {
6397 			head = tail = ftmp;
6398 		} else {
6399 			tail->next = ftmp;
6400 			tail = ftmp;
6401 		}
6402 
6403 		/* Do the firmware node first for 24xx's */
6404 		if (fw_done == 0) {
6405 			if (CFG_IST(ha, CFG_CTRL_242581)) {
6406 				freadpos = ha->flash_fw_addr << 2;
6407 			}
6408 			fw_done = 1;
6409 		}
6410 
6411 		/* read in first FBUFSIZE bytes of this flash section */
6412 		if (freadpos+FBUFSIZE > bsize) {
6413 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6414 			    freadpos, bsize);
6415 			rval = QL_FUNCTION_FAILED;
6416 			break;
6417 		}
6418 		bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6419 
6420 		/* checkout the pci data / format */
6421 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6422 			EL(ha, "flash header incorrect\n");
6423 			rval = QL_FUNCTION_FAILED;
6424 			break;
6425 		}
6426 	}
6427 
6428 	if (rval != QL_SUCCESS) {
6429 		/*
6430 		 * release all resources we have
6431 		 */
6432 		ql_fcache_rel(head);
6433 		EL(ha, "failed, done\n");
6434 	} else {
6435 		/*
6436 		 * Release previous fcache resources and update with new
6437 		 */
6438 		CACHE_LOCK(ha);
6439 		ql_fcache_rel(ha->fcache);
6440 		ha->fcache = head;
6441 		CACHE_UNLOCK(ha);
6442 
6443 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6444 	}
6445 }
6446 
6447 /*
6448  * ql_setup_fnode
6449  *	Allocates fcache node
6450  *
6451  * Input:
6452  *	ha = adapter state pointer.
6453  *	node = point to allocated fcache node (NULL = failed)
6454  *
6455  * Returns:
6456  *
6457  * Context:
6458  *	Kernel context.
6459  *
6460  * Note:
6461  *	Driver must be in stalled state prior to entering or
6462  *	add code to this function prior to calling ql_setup_flash()
6463  */
6464 static ql_fcache_t *
6465 ql_setup_fnode(ql_adapter_state_t *ha)
6466 {
6467 	ql_fcache_t	*fnode = NULL;
6468 
6469 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6470 	    KM_SLEEP))) == NULL) {
6471 		EL(ha, "fnode alloc failed\n");
6472 		fnode = NULL;
6473 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6474 	    KM_SLEEP))) == NULL) {
6475 		EL(ha, "buf alloc failed\n");
6476 		kmem_free(fnode, sizeof (ql_fcache_t));
6477 		fnode = NULL;
6478 	} else {
6479 		fnode->buflen = FBUFSIZE;
6480 	}
6481 
6482 	return (fnode);
6483 }
6484 
6485 /*
6486  * ql_fcache_rel
6487  *	Releases the fcache resources
6488  *
6489  * Input:
6490  *	ha	= adapter state pointer.
6491  *	head	= Pointer to fcache linked list
6492  *
6493  * Returns:
6494  *
6495  * Context:
6496  *	Kernel context.
6497  *
6498  */
6499 void
6500 ql_fcache_rel(ql_fcache_t *head)
6501 {
6502 	ql_fcache_t	*ftmp = head;
6503 	ql_fcache_t	*tail;
6504 
6505 	/* release all resources we have */
6506 	while (ftmp != NULL) {
6507 		tail = ftmp->next;
6508 		kmem_free(ftmp->buf, FBUFSIZE);
6509 		kmem_free(ftmp, sizeof (ql_fcache_t));
6510 		ftmp = tail;
6511 	}
6512 }
6513 
6514 /*
6515  * ql_update_flash_caches
6516  *	Updates driver flash caches
6517  *
6518  * Input:
6519  *	ha:	adapter state pointer.
6520  *
6521  * Context:
6522  *	Kernel context.
6523  */
6524 static void
6525 ql_update_flash_caches(ql_adapter_state_t *ha)
6526 {
6527 	uint32_t		len;
6528 	ql_link_t		*link;
6529 	ql_adapter_state_t	*ha2;
6530 
6531 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6532 
6533 	/* Get base path length. */
6534 	for (len = (uint32_t)strlen(ha->devpath); len; len--) {
6535 		if (ha->devpath[len] == ',' ||
6536 		    ha->devpath[len] == '@') {
6537 			break;
6538 		}
6539 	}
6540 
6541 	/* Reset fcache on all adapter instances. */
6542 	for (link = ql_hba.first; link != NULL; link = link->next) {
6543 		ha2 = link->base_address;
6544 
6545 		if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
6546 			continue;
6547 		}
6548 
6549 		CACHE_LOCK(ha2);
6550 		ql_fcache_rel(ha2->fcache);
6551 		ha2->fcache = NULL;
6552 
6553 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6554 			if (ha2->vcache != NULL) {
6555 				kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
6556 				ha2->vcache = NULL;
6557 			}
6558 		}
6559 		CACHE_UNLOCK(ha2);
6560 
6561 		(void) ql_setup_fcache(ha2);
6562 	}
6563 
6564 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6565 }
6566 
6567 /*
6568  * ql_get_fbuf
6569  *	Search the fcache list for the type specified
6570  *
6571  * Input:
6572  *	fptr	= Pointer to fcache linked list
6573  *	ftype	= Type of image to be returned.
6574  *
6575  * Returns:
6576  *	Pointer to ql_fcache_t.
6577  *	NULL means not found.
6578  *
6579  * Context:
6580  *	Kernel context.
6581  *
6582  *
6583  */
6584 ql_fcache_t *
6585 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6586 {
6587 	while (fptr != NULL) {
6588 		/* does this image meet criteria? */
6589 		if (ftype & fptr->type) {
6590 			break;
6591 		}
6592 		fptr = fptr->next;
6593 	}
6594 	return (fptr);
6595 }
6596 
6597 /*
6598  * ql_check_pci
6599  *
6600  *	checks the passed buffer for a valid pci signature and
6601  *	expected (and in range) pci length values.
6602  *
6603  *	For firmware type, a pci header is added since the image in
6604  *	the flash does not have one (!!!).
6605  *
6606  *	On successful pci check, nextpos adjusted to next pci header.
6607  *
6608  * Returns:
6609  *	-1 --> last pci image
6610  *	0 --> pci header valid
6611  *	1 --> pci header invalid.
6612  *
6613  * Context:
6614  *	Kernel context.
6615  */
6616 static int
6617 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6618 {
6619 	pci_header_t	*pcih;
6620 	pci_data_t	*pcid;
6621 	uint32_t	doff;
6622 	uint8_t		*pciinfo;
6623 
6624 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6625 
6626 	if (fcache != NULL) {
6627 		pciinfo = fcache->buf;
6628 	} else {
6629 		EL(ha, "failed, null fcache ptr passed\n");
6630 		return (1);
6631 	}
6632 
6633 	if (pciinfo == NULL) {
6634 		EL(ha, "failed, null pciinfo ptr passed\n");
6635 		return (1);
6636 	}
6637 
6638 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
6639 		caddr_t	bufp;
6640 		uint_t	len;
6641 
6642 		if (pciinfo[0] != SBUS_CODE_FCODE) {
6643 			EL(ha, "failed, unable to detect sbus fcode\n");
6644 			return (1);
6645 		}
6646 		fcache->type = FTYPE_FCODE;
6647 
6648 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6649 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6650 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6651 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6652 		    (int *)&len) == DDI_PROP_SUCCESS) {
6653 
6654 			(void) snprintf(fcache->verstr,
6655 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6656 			kmem_free(bufp, len);
6657 		}
6658 
6659 		*nextpos = 0xffffffff;
6660 
6661 		QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n",
6662 		    ha->instance);
6663 
6664 		return (0);
6665 	}
6666 
6667 	if (*nextpos == ha->flash_fw_addr << 2) {
6668 
6669 		pci_header_t	fwh = {0};
6670 		pci_data_t	fwd = {0};
6671 		uint8_t		*buf, *bufp;
6672 
6673 		/*
6674 		 * Build a pci header for the firmware module
6675 		 */
6676 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6677 		    NULL) {
6678 			EL(ha, "failed, unable to allocate buffer\n");
6679 			return (1);
6680 		}
6681 
6682 		fwh.signature[0] = PCI_HEADER0;
6683 		fwh.signature[1] = PCI_HEADER1;
6684 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6685 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6686 
6687 		fwd.signature[0] = 'P';
6688 		fwd.signature[1] = 'C';
6689 		fwd.signature[2] = 'I';
6690 		fwd.signature[3] = 'R';
6691 		fwd.codetype = PCI_CODE_FW;
6692 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6693 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6694 
6695 		bufp = buf;
6696 		bcopy(&fwh, bufp, sizeof (pci_header_t));
6697 		bufp += sizeof (pci_header_t);
6698 		bcopy(&fwd, bufp, sizeof (pci_data_t));
6699 		bufp += sizeof (pci_data_t);
6700 
6701 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6702 		    sizeof (pci_data_t)));
6703 		bcopy(buf, fcache->buf, FBUFSIZE);
6704 
6705 		fcache->type = FTYPE_FW;
6706 
6707 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6708 		    "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6709 		    fcache->buf[27]);
6710 
6711 		*nextpos = CFG_IST(ha, CFG_CTRL_81XX) ? 0x200000 : 0;
6712 		kmem_free(buf, FBUFSIZE);
6713 
6714 		QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance);
6715 
6716 		return (0);
6717 	}
6718 
6719 	/* get to the pci header image length */
6720 	pcih = (pci_header_t *)pciinfo;
6721 
6722 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6723 
6724 	/* some header section sanity check */
6725 	if (pcih->signature[0] != PCI_HEADER0 ||
6726 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6727 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6728 		    pcih->signature[0], pcih->signature[1], doff);
6729 		return (1);
6730 	}
6731 
6732 	pcid = (pci_data_t *)(pciinfo + doff);
6733 
6734 	/* a slight sanity data section check */
6735 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6736 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6737 		EL(ha, "failed, data sig mismatch!\n");
6738 		return (1);
6739 	}
6740 
6741 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6742 		EL(ha, "last image\n");
6743 		if (CFG_IST(ha, CFG_CTRL_242581)) {
6744 			ql_flash_layout_table(ha, *nextpos +
6745 			    (pcid->imagelength[0] | (pcid->imagelength[1] <<
6746 			    8)) * PCI_SECTOR_SIZE);
6747 			ql_24xx_flash_desc(ha);
6748 		}
6749 		*nextpos = 0xffffffff;
6750 	} else {
6751 		/* adjust the next flash read start position */
6752 		*nextpos += (pcid->imagelength[0] |
6753 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6754 	}
6755 
6756 	switch (pcid->codetype) {
6757 	case PCI_CODE_X86PC:
6758 		fcache->type = FTYPE_BIOS;
6759 		break;
6760 	case PCI_CODE_FCODE:
6761 		fcache->type = FTYPE_FCODE;
6762 		break;
6763 	case PCI_CODE_EFI:
6764 		fcache->type = FTYPE_EFI;
6765 		break;
6766 	case PCI_CODE_HPPA:
6767 		fcache->type = FTYPE_HPPA;
6768 		break;
6769 	default:
6770 		fcache->type = FTYPE_UNKNOWN;
6771 		break;
6772 	}
6773 
6774 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6775 	    "%d.%d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6776 
6777 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6778 
6779 	return (0);
6780 }
6781 
6782 /*
6783  * ql_flash_layout_table
6784  *	Obtains flash addresses from table
6785  *
6786  * Input:
6787  *	ha:		adapter state pointer.
6788  *	flt_paddr:	flash layout pointer address.
6789  *
6790  * Context:
6791  *	Kernel context.
6792  */
6793 static void
6794 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
6795 {
6796 	ql_flt_ptr_t	*fptr;
6797 	ql_flt_hdr_t	*fhdr;
6798 	ql_flt_region_t	*frgn;
6799 	uint8_t		*bp;
6800 	int		rval;
6801 	uint32_t	len, faddr, cnt;
6802 	uint16_t	chksum, w16;
6803 
6804 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6805 
6806 	/* Process flash layout table header */
6807 	if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
6808 		EL(ha, "kmem_zalloc=null\n");
6809 		return;
6810 	}
6811 
6812 	/* Process pointer to flash layout table */
6813 	if ((rval = ql_dump_fcode(ha, bp, sizeof (ql_flt_ptr_t), flt_paddr)) !=
6814 	    QL_SUCCESS) {
6815 		EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
6816 		    rval);
6817 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6818 		return;
6819 	}
6820 	fptr = (ql_flt_ptr_t *)bp;
6821 
6822 	/* Verify pointer to flash layout table. */
6823 	for (chksum = 0, cnt = 0; cnt < sizeof (ql_flt_ptr_t); cnt += 2) {
6824 		w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6825 		chksum += w16;
6826 	}
6827 	if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
6828 	    fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
6829 		EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0],
6830 		    fptr->sig[1], fptr->sig[2], fptr->sig[3]);
6831 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6832 		return;
6833 	}
6834 	faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
6835 	    fptr->addr[3]);
6836 
6837 	/* Process flash layout table. */
6838 	if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
6839 	    QL_SUCCESS) {
6840 		EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
6841 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6842 		return;
6843 	}
6844 	fhdr = (ql_flt_hdr_t *)bp;
6845 
6846 	/* Verify flash layout table. */
6847 	len = (uint16_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
6848 	    sizeof (ql_flt_hdr_t));
6849 	if (len > FLASH_LAYOUT_TABLE_SIZE) {
6850 		chksum = 0xffff;
6851 	} else {
6852 		for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6853 			w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6854 			chksum += w16;
6855 		}
6856 	}
6857 	w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
6858 	if (chksum != 0 || w16 != 1) {
6859 		EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
6860 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6861 		return;
6862 	}
6863 
6864 	/* Process flash layout table regions */
6865 	for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
6866 	    (caddr_t)frgn < (caddr_t)(bp + FLASH_LAYOUT_TABLE_SIZE); frgn++) {
6867 		faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
6868 		    frgn->beg_addr[2], frgn->beg_addr[3]);
6869 		faddr >>= 2;
6870 
6871 		switch (frgn->region) {
6872 		case FLASH_FW_REGION:
6873 			ha->flash_fw_addr = faddr;
6874 			QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh\n",
6875 			    ha->instance, faddr);
6876 			break;
6877 		case FLASH_GOLDEN_FW_REGION:
6878 			ha->flash_golden_fw_addr = faddr;
6879 			QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n",
6880 			    ha->instance, faddr);
6881 			break;
6882 		case FLASH_VPD_0_REGION:
6883 			if (!(ha->flags & FUNCTION_1)) {
6884 				ha->flash_vpd_addr = faddr;
6885 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
6886 				    "\n", ha->instance, faddr);
6887 			}
6888 			break;
6889 		case FLASH_NVRAM_0_REGION:
6890 			if (!(ha->flags & FUNCTION_1)) {
6891 				ha->flash_nvram_addr = faddr;
6892 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
6893 				    "%xh\n", ha->instance, faddr);
6894 			}
6895 			break;
6896 		case FLASH_VPD_1_REGION:
6897 			if (ha->flags & FUNCTION_1) {
6898 				ha->flash_vpd_addr = faddr;
6899 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
6900 				    "\n", ha->instance, faddr);
6901 			}
6902 			break;
6903 		case FLASH_NVRAM_1_REGION:
6904 			if (ha->flags & FUNCTION_1) {
6905 				ha->flash_nvram_addr = faddr;
6906 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
6907 				    "%xh\n", ha->instance, faddr);
6908 			}
6909 			break;
6910 		case FLASH_DESC_TABLE_REGION:
6911 			ha->flash_desc_addr = faddr;
6912 			QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr=%xh\n",
6913 			    ha->instance, faddr);
6914 			break;
6915 		case FLASH_ERROR_LOG_0_REGION:
6916 			if (!(ha->flags & FUNCTION_1)) {
6917 				ha->flash_errlog_start = faddr;
6918 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
6919 				    "%xh\n", ha->instance, faddr);
6920 			}
6921 			break;
6922 		case FLASH_ERROR_LOG_1_REGION:
6923 			if (ha->flags & FUNCTION_1) {
6924 				ha->flash_errlog_start = faddr;
6925 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
6926 				    "%xh\n", ha->instance, faddr);
6927 			}
6928 			break;
6929 		default:
6930 			break;
6931 		}
6932 	}
6933 	kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
6934 
6935 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6936 }
6937 
6938 /*
6939  * ql_flash_nvram_defaults
6940  *	Flash default addresses.
6941  *
6942  * Input:
6943  *	ha:		adapter state pointer.
6944  *
6945  * Returns:
6946  *	ql local function return status code.
6947  *
6948  * Context:
6949  *	Kernel context.
6950  */
6951 static void
6952 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
6953 {
6954 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6955 
6956 	if (ha->flags & FUNCTION_1) {
6957 		if (CFG_IST(ha, CFG_CTRL_2300)) {
6958 			ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
6959 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
6960 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
6961 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6962 			ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
6963 			ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
6964 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
6965 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
6966 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
6967 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
6968 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6969 			ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
6970 			ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
6971 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
6972 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
6973 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
6974 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
6975 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
6976 			ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
6977 			ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
6978 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
6979 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
6980 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
6981 		}
6982 	} else {
6983 		if (CFG_IST(ha, CFG_CTRL_2200)) {
6984 			ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
6985 			ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
6986 		} else if (CFG_IST(ha, CFG_CTRL_2300)) {
6987 			ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
6988 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
6989 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
6990 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6991 			ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
6992 			ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
6993 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
6994 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
6995 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
6996 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
6997 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
6998 			ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
6999 			ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7000 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7001 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7002 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7003 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7004 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7005 			ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7006 			ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7007 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7008 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7009 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7010 		}
7011 	}
7012 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7013 }
7014 
7015 /*
7016  * ql_get_sfp
7017  *	Returns sfp data to sdmapi caller
7018  *
7019  * Input:
7020  *	ha:	adapter state pointer.
7021  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7022  *	mode:	flags.
7023  *
7024  * Returns:
7025  *	None, request status indicated in cmd->Status.
7026  *
7027  * Context:
7028  *	Kernel context.
7029  */
7030 static void
7031 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7032 {
7033 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7034 
7035 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
7036 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7037 		EL(ha, "failed, invalid request for HBA\n");
7038 		return;
7039 	}
7040 
7041 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7042 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7043 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
7044 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7045 		    cmd->ResponseLen);
7046 		return;
7047 	}
7048 
7049 	/* Dump SFP data in user buffer */
7050 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7051 	    mode)) != 0) {
7052 		cmd->Status = EXT_STATUS_COPY_ERR;
7053 		EL(ha, "failed, copy error\n");
7054 	} else {
7055 		cmd->Status = EXT_STATUS_OK;
7056 	}
7057 
7058 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7059 }
7060 
7061 /*
7062  * ql_dump_sfp
7063  *	Dumps SFP.
7064  *
7065  * Input:
7066  *	ha:	adapter state pointer.
7067  *	bp:	buffer address.
7068  *	mode:	flags
7069  *
7070  * Returns:
7071  *
7072  * Context:
7073  *	Kernel context.
7074  */
7075 static int
7076 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7077 {
7078 	dma_mem_t	mem;
7079 	uint32_t	cnt;
7080 	int		rval2, rval = 0;
7081 	uint32_t	dxfer;
7082 
7083 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7084 
7085 	/* Get memory for SFP. */
7086 
7087 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7088 	    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7089 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7090 		return (ENOMEM);
7091 	}
7092 
7093 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7094 		rval2 = ql_read_sfp(ha, &mem,
7095 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7096 		    (uint16_t)(cnt & 0xff));
7097 		if (rval2 != QL_SUCCESS) {
7098 			EL(ha, "failed, read_sfp=%xh\n", rval2);
7099 			rval = EFAULT;
7100 			break;
7101 		}
7102 
7103 		/* copy the data back */
7104 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7105 		    mode)) != mem.size) {
7106 			/* ddi copy error */
7107 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7108 			rval = EFAULT;
7109 			break;
7110 		}
7111 
7112 		/* adjust the buffer pointer */
7113 		bp = (caddr_t)bp + mem.size;
7114 	}
7115 
7116 	ql_free_phys(ha, &mem);
7117 
7118 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7119 
7120 	return (rval);
7121 }
7122 
7123 /*
7124  * ql_port_param
7125  *	Retrieves or sets the firmware port speed settings
7126  *
7127  * Input:
7128  *	ha:	adapter state pointer.
7129  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7130  *	mode:	flags.
7131  *
7132  * Returns:
7133  *	None, request status indicated in cmd->Status.
7134  *
7135  * Context:
7136  *	Kernel context.
7137  *
7138  */
7139 static void
7140 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7141 {
7142 	uint8_t			*name;
7143 	ql_tgt_t		*tq;
7144 	EXT_PORT_PARAM		port_param = {0};
7145 	uint32_t		rval = QL_SUCCESS;
7146 	uint32_t		idma_rate;
7147 
7148 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7149 
7150 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7151 		EL(ha, "invalid request for this HBA\n");
7152 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7153 		cmd->ResponseLen = 0;
7154 		return;
7155 	}
7156 
7157 	if (LOOP_NOT_READY(ha)) {
7158 		EL(ha, "failed, loop not ready\n");
7159 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7160 		cmd->ResponseLen = 0;
7161 		return;
7162 	}
7163 
7164 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7165 	    (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7166 		EL(ha, "failed, ddi_copyin\n");
7167 		cmd->Status = EXT_STATUS_COPY_ERR;
7168 		cmd->ResponseLen = 0;
7169 		return;
7170 	}
7171 
7172 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7173 		EL(ha, "Unsupported dest lookup type: %xh\n",
7174 		    port_param.FCScsiAddr.DestType);
7175 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7176 		cmd->ResponseLen = 0;
7177 		return;
7178 	}
7179 
7180 	name = port_param.FCScsiAddr.DestAddr.WWPN;
7181 
7182 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7183 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
7184 	    name[5], name[6], name[7]);
7185 
7186 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7187 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
7188 		EL(ha, "failed, fc_port not found\n");
7189 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7190 		cmd->ResponseLen = 0;
7191 		return;
7192 	}
7193 
7194 	cmd->Status = EXT_STATUS_OK;
7195 	cmd->DetailStatus = EXT_STATUS_OK;
7196 
7197 	switch (port_param.Mode) {
7198 	case EXT_IIDMA_MODE_GET:
7199 		/*
7200 		 * Report the firmware's port rate for the wwpn
7201 		 */
7202 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7203 		    port_param.Mode);
7204 
7205 		if (rval != QL_SUCCESS) {
7206 			EL(ha, "iidma get failed: %xh\n", rval);
7207 			cmd->Status = EXT_STATUS_MAILBOX;
7208 			cmd->DetailStatus = rval;
7209 			cmd->ResponseLen = 0;
7210 		} else {
7211 			switch (idma_rate) {
7212 			case IIDMA_RATE_1GB:
7213 				port_param.Speed =
7214 				    EXT_DEF_PORTSPEED_1GBIT;
7215 				break;
7216 			case IIDMA_RATE_2GB:
7217 				port_param.Speed =
7218 				    EXT_DEF_PORTSPEED_2GBIT;
7219 				break;
7220 			case IIDMA_RATE_4GB:
7221 				port_param.Speed =
7222 				    EXT_DEF_PORTSPEED_4GBIT;
7223 				break;
7224 			case IIDMA_RATE_8GB:
7225 				port_param.Speed =
7226 				    EXT_DEF_PORTSPEED_8GBIT;
7227 				break;
7228 			case IIDMA_RATE_10GB:
7229 				port_param.Speed =
7230 				    EXT_DEF_PORTSPEED_10GBIT;
7231 				break;
7232 			default:
7233 				port_param.Speed =
7234 				    EXT_DEF_PORTSPEED_UNKNOWN;
7235 				EL(ha, "failed, Port speed rate=%xh\n",
7236 				    idma_rate);
7237 				break;
7238 			}
7239 
7240 			/* Copy back the data */
7241 			rval = ddi_copyout((void *)&port_param,
7242 			    (void *)(uintptr_t)cmd->ResponseAdr,
7243 			    sizeof (EXT_PORT_PARAM), mode);
7244 
7245 			if (rval != 0) {
7246 				cmd->Status = EXT_STATUS_COPY_ERR;
7247 				cmd->ResponseLen = 0;
7248 				EL(ha, "failed, ddi_copyout\n");
7249 			} else {
7250 				cmd->ResponseLen = (uint32_t)
7251 				    sizeof (EXT_PORT_PARAM);
7252 			}
7253 		}
7254 		break;
7255 
7256 	case EXT_IIDMA_MODE_SET:
7257 		/*
7258 		 * Set the firmware's port rate for the wwpn
7259 		 */
7260 		switch (port_param.Speed) {
7261 		case EXT_DEF_PORTSPEED_1GBIT:
7262 			idma_rate = IIDMA_RATE_1GB;
7263 			break;
7264 		case EXT_DEF_PORTSPEED_2GBIT:
7265 			idma_rate = IIDMA_RATE_2GB;
7266 			break;
7267 		case EXT_DEF_PORTSPEED_4GBIT:
7268 			idma_rate = IIDMA_RATE_4GB;
7269 			break;
7270 		case EXT_DEF_PORTSPEED_8GBIT:
7271 			idma_rate = IIDMA_RATE_8GB;
7272 			break;
7273 		case EXT_DEF_PORTSPEED_10GBIT:
7274 			port_param.Speed = IIDMA_RATE_10GB;
7275 			break;
7276 		default:
7277 			EL(ha, "invalid set iidma rate: %x\n",
7278 			    port_param.Speed);
7279 			cmd->Status = EXT_STATUS_INVALID_PARAM;
7280 			cmd->ResponseLen = 0;
7281 			rval = QL_PARAMETER_ERROR;
7282 			break;
7283 		}
7284 
7285 		if (rval == QL_SUCCESS) {
7286 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7287 			    port_param.Mode);
7288 			if (rval != QL_SUCCESS) {
7289 				EL(ha, "iidma set failed: %xh\n", rval);
7290 				cmd->Status = EXT_STATUS_MAILBOX;
7291 				cmd->DetailStatus = rval;
7292 				cmd->ResponseLen = 0;
7293 			}
7294 		}
7295 		break;
7296 	default:
7297 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
7298 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7299 		cmd->ResponseLen = 0;
7300 		cmd->DetailStatus = 0;
7301 		break;
7302 	}
7303 
7304 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7305 }
7306 
7307 /*
7308  * ql_get_fwexttrace
7309  *	Dumps f/w extended trace buffer
7310  *
7311  * Input:
7312  *	ha:	adapter state pointer.
7313  *	bp:	buffer address.
7314  *	mode:	flags
7315  *
7316  * Returns:
7317  *
7318  * Context:
7319  *	Kernel context.
7320  */
7321 /* ARGSUSED */
7322 static void
7323 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7324 {
7325 	int	rval;
7326 	caddr_t	payload;
7327 
7328 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7329 
7330 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7331 		EL(ha, "invalid request for this HBA\n");
7332 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7333 		cmd->ResponseLen = 0;
7334 		return;
7335 	}
7336 
7337 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7338 	    (ha->fwexttracebuf.bp == NULL)) {
7339 		EL(ha, "f/w extended trace is not enabled\n");
7340 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7341 		cmd->ResponseLen = 0;
7342 		return;
7343 	}
7344 
7345 	if (cmd->ResponseLen < FWEXTSIZE) {
7346 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7347 		cmd->DetailStatus = FWEXTSIZE;
7348 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7349 		    cmd->ResponseLen, FWEXTSIZE);
7350 		cmd->ResponseLen = 0;
7351 		return;
7352 	}
7353 
7354 	/* Time Stamp */
7355 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7356 	if (rval != QL_SUCCESS) {
7357 		EL(ha, "f/w extended trace insert"
7358 		    "time stamp failed: %xh\n", rval);
7359 		cmd->Status = EXT_STATUS_ERR;
7360 		cmd->ResponseLen = 0;
7361 		return;
7362 	}
7363 
7364 	/* Disable Tracing */
7365 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7366 	if (rval != QL_SUCCESS) {
7367 		EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7368 		cmd->Status = EXT_STATUS_ERR;
7369 		cmd->ResponseLen = 0;
7370 		return;
7371 	}
7372 
7373 	/* Allocate payload buffer */
7374 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7375 	if (payload == NULL) {
7376 		EL(ha, "failed, kmem_zalloc\n");
7377 		cmd->Status = EXT_STATUS_NO_MEMORY;
7378 		cmd->ResponseLen = 0;
7379 		return;
7380 	}
7381 
7382 	/* Sync DMA buffer. */
7383 	(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7384 	    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7385 
7386 	/* Copy trace buffer data. */
7387 	ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7388 	    (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7389 	    DDI_DEV_AUTOINCR);
7390 
7391 	/* Send payload to application. */
7392 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7393 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7394 		EL(ha, "failed, send_buffer_data\n");
7395 		cmd->Status = EXT_STATUS_COPY_ERR;
7396 		cmd->ResponseLen = 0;
7397 	} else {
7398 		cmd->Status = EXT_STATUS_OK;
7399 	}
7400 
7401 	kmem_free(payload, FWEXTSIZE);
7402 
7403 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7404 }
7405 
7406 /*
7407  * ql_get_fwfcetrace
7408  *	Dumps f/w fibre channel event trace buffer
7409  *
7410  * Input:
7411  *	ha:	adapter state pointer.
7412  *	bp:	buffer address.
7413  *	mode:	flags
7414  *
7415  * Returns:
7416  *
7417  * Context:
7418  *	Kernel context.
7419  */
7420 /* ARGSUSED */
7421 static void
7422 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7423 {
7424 	int	rval;
7425 	caddr_t	payload;
7426 
7427 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7428 
7429 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7430 		EL(ha, "invalid request for this HBA\n");
7431 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7432 		cmd->ResponseLen = 0;
7433 		return;
7434 	}
7435 
7436 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7437 	    (ha->fwfcetracebuf.bp == NULL)) {
7438 		EL(ha, "f/w FCE trace is not enabled\n");
7439 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7440 		cmd->ResponseLen = 0;
7441 		return;
7442 	}
7443 
7444 	if (cmd->ResponseLen < FWFCESIZE) {
7445 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7446 		cmd->DetailStatus = FWFCESIZE;
7447 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7448 		    cmd->ResponseLen, FWFCESIZE);
7449 		cmd->ResponseLen = 0;
7450 		return;
7451 	}
7452 
7453 	/* Disable Tracing */
7454 	rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7455 	if (rval != QL_SUCCESS) {
7456 		EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7457 		cmd->Status = EXT_STATUS_ERR;
7458 		cmd->ResponseLen = 0;
7459 		return;
7460 	}
7461 
7462 	/* Allocate payload buffer */
7463 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7464 	if (payload == NULL) {
7465 		EL(ha, "failed, kmem_zalloc\n");
7466 		cmd->Status = EXT_STATUS_NO_MEMORY;
7467 		cmd->ResponseLen = 0;
7468 		return;
7469 	}
7470 
7471 	/* Sync DMA buffer. */
7472 	(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7473 	    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7474 
7475 	/* Copy trace buffer data. */
7476 	ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7477 	    (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7478 	    DDI_DEV_AUTOINCR);
7479 
7480 	/* Send payload to application. */
7481 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7482 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7483 		EL(ha, "failed, send_buffer_data\n");
7484 		cmd->Status = EXT_STATUS_COPY_ERR;
7485 		cmd->ResponseLen = 0;
7486 	} else {
7487 		cmd->Status = EXT_STATUS_OK;
7488 	}
7489 
7490 	kmem_free(payload, FWFCESIZE);
7491 
7492 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7493 }
7494 
7495 /*
7496  * ql_get_pci_data
7497  *	Retrieves pci config space data
7498  *
7499  * Input:
7500  *	ha:	adapter state pointer.
7501  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7502  *	mode:	flags.
7503  *
7504  * Returns:
7505  *	None, request status indicated in cmd->Status.
7506  *
7507  * Context:
7508  *	Kernel context.
7509  *
7510  */
7511 static void
7512 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7513 {
7514 	uint8_t		cap_ptr;
7515 	uint8_t		cap_id;
7516 	uint32_t	buf_size = 256;
7517 
7518 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7519 
7520 	/*
7521 	 * First check the "Capabilities List" bit of the status register.
7522 	 */
7523 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7524 		/*
7525 		 * Now get the capability pointer
7526 		 */
7527 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7528 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7529 			/*
7530 			 * Check for the pcie capability.
7531 			 */
7532 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7533 			if (cap_id == PCI_CAP_ID_PCI_E) {
7534 				buf_size = 4096;
7535 				break;
7536 			}
7537 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7538 			    (cap_ptr + PCI_CAP_NEXT_PTR));
7539 		}
7540 	}
7541 
7542 	if (cmd->ResponseLen < buf_size) {
7543 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7544 		cmd->DetailStatus = buf_size;
7545 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7546 		    cmd->ResponseLen);
7547 		return;
7548 	}
7549 
7550 	/* Dump PCI config data. */
7551 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7552 	    buf_size, mode)) != 0) {
7553 		cmd->Status = EXT_STATUS_COPY_ERR;
7554 		cmd->DetailStatus = 0;
7555 		EL(ha, "failed, copy err pci_dump\n");
7556 	} else {
7557 		cmd->Status = EXT_STATUS_OK;
7558 		cmd->DetailStatus = buf_size;
7559 	}
7560 
7561 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7562 }
7563 
7564 /*
7565  * ql_pci_dump
7566  *	Dumps PCI config data to application buffer.
7567  *
7568  * Input:
7569  *	ha = adapter state pointer.
7570  *	bp = user buffer address.
7571  *
7572  * Returns:
7573  *
7574  * Context:
7575  *	Kernel context.
7576  */
7577 int
7578 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7579 {
7580 	uint32_t	pci_os;
7581 	uint32_t	*ptr32, *org_ptr32;
7582 
7583 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7584 
7585 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7586 	if (ptr32 == NULL) {
7587 		EL(ha, "failed kmem_zalloc\n");
7588 		return (ENOMEM);
7589 	}
7590 
7591 	/* store the initial value of ptr32 */
7592 	org_ptr32 = ptr32;
7593 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7594 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7595 		LITTLE_ENDIAN_32(ptr32);
7596 		ptr32++;
7597 	}
7598 
7599 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7600 	    0) {
7601 		EL(ha, "failed ddi_copyout\n");
7602 		kmem_free(org_ptr32, pci_size);
7603 		return (EFAULT);
7604 	}
7605 
7606 	QL_DUMP_9(org_ptr32, 8, pci_size);
7607 
7608 	kmem_free(org_ptr32, pci_size);
7609 
7610 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7611 
7612 	return (0);
7613 }
7614 
7615 /*
7616  * ql_menlo_reset
7617  *	Reset Menlo
7618  *
7619  * Input:
7620  *	ha:	adapter state pointer.
7621  *	bp:	buffer address.
7622  *	mode:	flags
7623  *
7624  * Returns:
7625  *
7626  * Context:
7627  *	Kernel context.
7628  */
7629 static void
7630 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7631 {
7632 	EXT_MENLO_RESET	rst;
7633 	ql_mbx_data_t	mr;
7634 	int		rval;
7635 
7636 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7637 
7638 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7639 		EL(ha, "failed, invalid request for HBA\n");
7640 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7641 		cmd->ResponseLen = 0;
7642 		return;
7643 	}
7644 
7645 	/*
7646 	 * TODO: only vp_index 0 can do this (?)
7647 	 */
7648 
7649 	/*  Verify the size of request structure. */
7650 	if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
7651 		/* Return error */
7652 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7653 		    sizeof (EXT_MENLO_RESET));
7654 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7655 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7656 		cmd->ResponseLen = 0;
7657 		return;
7658 	}
7659 
7660 	/* Get reset request. */
7661 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7662 	    (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) {
7663 		EL(ha, "failed, ddi_copyin\n");
7664 		cmd->Status = EXT_STATUS_COPY_ERR;
7665 		cmd->ResponseLen = 0;
7666 		return;
7667 	}
7668 
7669 	/* Wait for I/O to stop and daemon to stall. */
7670 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7671 		EL(ha, "ql_stall_driver failed\n");
7672 		ql_restart_hba(ha);
7673 		cmd->Status = EXT_STATUS_BUSY;
7674 		cmd->ResponseLen = 0;
7675 		return;
7676 	}
7677 
7678 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
7679 	if (rval != QL_SUCCESS) {
7680 		EL(ha, "failed, status=%xh\n", rval);
7681 		cmd->Status = EXT_STATUS_MAILBOX;
7682 		cmd->DetailStatus = rval;
7683 		cmd->ResponseLen = 0;
7684 	} else if (mr.mb[1] != 0) {
7685 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
7686 		cmd->Status = EXT_STATUS_ERR;
7687 		cmd->DetailStatus = mr.mb[1];
7688 		cmd->ResponseLen = 0;
7689 	}
7690 
7691 	ql_restart_hba(ha);
7692 
7693 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7694 }
7695 
7696 /*
7697  * ql_menlo_get_fw_version
7698  *	Get Menlo firmware version.
7699  *
7700  * Input:
7701  *	ha:	adapter state pointer.
7702  *	bp:	buffer address.
7703  *	mode:	flags
7704  *
7705  * Returns:
7706  *
7707  * Context:
7708  *	Kernel context.
7709  */
7710 static void
7711 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7712 {
7713 	int				rval;
7714 	ql_mbx_iocb_t			*pkt;
7715 	EXT_MENLO_GET_FW_VERSION	ver = {0};
7716 
7717 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7718 
7719 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7720 		EL(ha, "failed, invalid request for HBA\n");
7721 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7722 		cmd->ResponseLen = 0;
7723 		return;
7724 	}
7725 
7726 	if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
7727 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7728 		cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
7729 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
7730 		    sizeof (EXT_MENLO_GET_FW_VERSION));
7731 		cmd->ResponseLen = 0;
7732 		return;
7733 	}
7734 
7735 	/* Allocate packet. */
7736 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7737 	if (pkt == NULL) {
7738 		EL(ha, "failed, kmem_zalloc\n");
7739 		cmd->Status = EXT_STATUS_NO_MEMORY;
7740 		cmd->ResponseLen = 0;
7741 		return;
7742 	}
7743 
7744 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7745 	pkt->mvfy.entry_count = 1;
7746 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
7747 
7748 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7749 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7750 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7751 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
7752 
7753 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7754 	    pkt->mvfy.options_status != CS_COMPLETE) {
7755 		/* Command error */
7756 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7757 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7758 		    pkt->mvfy.failure_code);
7759 		cmd->Status = EXT_STATUS_ERR;
7760 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7761 		    QL_FUNCTION_FAILED;
7762 		cmd->ResponseLen = 0;
7763 	} else if (ddi_copyout((void *)&ver,
7764 	    (void *)(uintptr_t)cmd->ResponseAdr,
7765 	    sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
7766 		EL(ha, "failed, ddi_copyout\n");
7767 		cmd->Status = EXT_STATUS_COPY_ERR;
7768 		cmd->ResponseLen = 0;
7769 	} else {
7770 		cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
7771 	}
7772 
7773 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7774 
7775 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7776 }
7777 
7778 /*
7779  * ql_menlo_update_fw
7780  *	Get Menlo update firmware.
7781  *
7782  * Input:
7783  *	ha:	adapter state pointer.
7784  *	bp:	buffer address.
7785  *	mode:	flags
7786  *
7787  * Returns:
7788  *
7789  * Context:
7790  *	Kernel context.
7791  */
7792 static void
7793 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7794 {
7795 	ql_mbx_iocb_t		*pkt;
7796 	dma_mem_t		*dma_mem;
7797 	EXT_MENLO_UPDATE_FW	fw;
7798 	uint32_t		*ptr32;
7799 	int			rval;
7800 
7801 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7802 
7803 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7804 		EL(ha, "failed, invalid request for HBA\n");
7805 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7806 		cmd->ResponseLen = 0;
7807 		return;
7808 	}
7809 
7810 	/*
7811 	 * TODO: only vp_index 0 can do this (?)
7812 	 */
7813 
7814 	/*  Verify the size of request structure. */
7815 	if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
7816 		/* Return error */
7817 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7818 		    sizeof (EXT_MENLO_UPDATE_FW));
7819 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7820 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7821 		cmd->ResponseLen = 0;
7822 		return;
7823 	}
7824 
7825 	/* Get update fw request. */
7826 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
7827 	    sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) {
7828 		EL(ha, "failed, ddi_copyin\n");
7829 		cmd->Status = EXT_STATUS_COPY_ERR;
7830 		cmd->ResponseLen = 0;
7831 		return;
7832 	}
7833 
7834 	/* Wait for I/O to stop and daemon to stall. */
7835 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
7836 		EL(ha, "ql_stall_driver failed\n");
7837 		ql_restart_hba(ha);
7838 		cmd->Status = EXT_STATUS_BUSY;
7839 		cmd->ResponseLen = 0;
7840 		return;
7841 	}
7842 
7843 	/* Allocate packet. */
7844 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
7845 	if (dma_mem == NULL) {
7846 		EL(ha, "failed, kmem_zalloc\n");
7847 		cmd->Status = EXT_STATUS_NO_MEMORY;
7848 		cmd->ResponseLen = 0;
7849 		return;
7850 	}
7851 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7852 	if (pkt == NULL) {
7853 		EL(ha, "failed, kmem_zalloc\n");
7854 		kmem_free(dma_mem, sizeof (dma_mem_t));
7855 		ql_restart_hba(ha);
7856 		cmd->Status = EXT_STATUS_NO_MEMORY;
7857 		cmd->ResponseLen = 0;
7858 		return;
7859 	}
7860 
7861 	/* Get DMA memory for the IOCB */
7862 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
7863 	    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
7864 		cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
7865 		    "alloc failed", QL_NAME, ha->instance);
7866 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7867 		kmem_free(dma_mem, sizeof (dma_mem_t));
7868 		ql_restart_hba(ha);
7869 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
7870 		cmd->ResponseLen = 0;
7871 		return;
7872 	}
7873 
7874 	/* Get firmware data. */
7875 	if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
7876 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
7877 		EL(ha, "failed, get_buffer_data\n");
7878 		ql_free_dma_resource(ha, dma_mem);
7879 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7880 		kmem_free(dma_mem, sizeof (dma_mem_t));
7881 		ql_restart_hba(ha);
7882 		cmd->Status = EXT_STATUS_COPY_ERR;
7883 		cmd->ResponseLen = 0;
7884 		return;
7885 	}
7886 
7887 	/* Sync DMA buffer. */
7888 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
7889 	    DDI_DMA_SYNC_FORDEV);
7890 
7891 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
7892 	pkt->mvfy.entry_count = 1;
7893 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
7894 	ptr32 = dma_mem->bp;
7895 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
7896 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
7897 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
7898 	pkt->mvfy.dseg_count = LE_16(1);
7899 	pkt->mvfy.dseg_0_address[0] = (uint32_t)
7900 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
7901 	pkt->mvfy.dseg_0_address[1] = (uint32_t)
7902 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
7903 	pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
7904 
7905 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
7906 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
7907 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
7908 
7909 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
7910 	    pkt->mvfy.options_status != CS_COMPLETE) {
7911 		/* Command error */
7912 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
7913 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
7914 		    pkt->mvfy.failure_code);
7915 		cmd->Status = EXT_STATUS_ERR;
7916 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
7917 		    QL_FUNCTION_FAILED;
7918 		cmd->ResponseLen = 0;
7919 	}
7920 
7921 	ql_free_dma_resource(ha, dma_mem);
7922 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
7923 	kmem_free(dma_mem, sizeof (dma_mem_t));
7924 	ql_restart_hba(ha);
7925 
7926 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7927 }
7928 
7929 /*
7930  * ql_menlo_manage_info
7931  *	Get Menlo manage info.
7932  *
7933  * Input:
7934  *	ha:	adapter state pointer.
7935  *	bp:	buffer address.
7936  *	mode:	flags
7937  *
7938  * Returns:
7939  *
7940  * Context:
7941  *	Kernel context.
7942  */
7943 static void
7944 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7945 {
7946 	ql_mbx_iocb_t		*pkt;
7947 	dma_mem_t		*dma_mem = NULL;
7948 	EXT_MENLO_MANAGE_INFO	info;
7949 	int			rval;
7950 
7951 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7952 
7953 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7954 		EL(ha, "failed, invalid request for HBA\n");
7955 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7956 		cmd->ResponseLen = 0;
7957 		return;
7958 	}
7959 
7960 	/*  Verify the size of request structure. */
7961 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
7962 		/* Return error */
7963 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7964 		    sizeof (EXT_MENLO_MANAGE_INFO));
7965 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7966 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7967 		cmd->ResponseLen = 0;
7968 		return;
7969 	}
7970 
7971 	/* Get manage info request. */
7972 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
7973 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
7974 		EL(ha, "failed, ddi_copyin\n");
7975 		cmd->Status = EXT_STATUS_COPY_ERR;
7976 		cmd->ResponseLen = 0;
7977 		return;
7978 	}
7979 
7980 	/* Allocate packet. */
7981 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
7982 	if (pkt == NULL) {
7983 		EL(ha, "failed, kmem_zalloc\n");
7984 		ql_restart_driver(ha);
7985 		cmd->Status = EXT_STATUS_NO_MEMORY;
7986 		cmd->ResponseLen = 0;
7987 		return;
7988 	}
7989 
7990 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
7991 	pkt->mdata.entry_count = 1;
7992 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
7993 
7994 	/* Get DMA memory for the IOCB */
7995 	if (info.Operation == MENLO_OP_READ_MEM ||
7996 	    info.Operation == MENLO_OP_WRITE_MEM) {
7997 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
7998 		pkt->mdata.parameter_1 =
7999 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8000 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8001 		    KM_SLEEP);
8002 		if (dma_mem == NULL) {
8003 			EL(ha, "failed, kmem_zalloc\n");
8004 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8005 			cmd->Status = EXT_STATUS_NO_MEMORY;
8006 			cmd->ResponseLen = 0;
8007 			return;
8008 		}
8009 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8010 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8011 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8012 			    "alloc failed", QL_NAME, ha->instance);
8013 			kmem_free(dma_mem, sizeof (dma_mem_t));
8014 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8015 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8016 			cmd->ResponseLen = 0;
8017 			return;
8018 		}
8019 		if (info.Operation == MENLO_OP_WRITE_MEM) {
8020 			/* Get data. */
8021 			if (ql_get_buffer_data(
8022 			    (caddr_t)(uintptr_t)info.pDataBytes,
8023 			    dma_mem->bp, info.TotalByteCount, mode) !=
8024 			    info.TotalByteCount) {
8025 				EL(ha, "failed, get_buffer_data\n");
8026 				ql_free_dma_resource(ha, dma_mem);
8027 				kmem_free(dma_mem, sizeof (dma_mem_t));
8028 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8029 				cmd->Status = EXT_STATUS_COPY_ERR;
8030 				cmd->ResponseLen = 0;
8031 				return;
8032 			}
8033 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
8034 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
8035 		}
8036 		pkt->mdata.dseg_count = LE_16(1);
8037 		pkt->mdata.dseg_0_address[0] = (uint32_t)
8038 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8039 		pkt->mdata.dseg_0_address[1] = (uint32_t)
8040 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8041 		pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
8042 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8043 		pkt->mdata.parameter_1 =
8044 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8045 		pkt->mdata.parameter_2 =
8046 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8047 		pkt->mdata.parameter_3 =
8048 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8049 	} else if (info.Operation & MENLO_OP_GET_INFO) {
8050 		pkt->mdata.parameter_1 =
8051 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8052 		pkt->mdata.parameter_2 =
8053 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8054 	}
8055 
8056 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8057 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8058 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8059 
8060 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8061 	    pkt->mdata.options_status != CS_COMPLETE) {
8062 		/* Command error */
8063 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8064 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
8065 		    pkt->mdata.failure_code);
8066 		cmd->Status = EXT_STATUS_ERR;
8067 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8068 		    QL_FUNCTION_FAILED;
8069 		cmd->ResponseLen = 0;
8070 	} else if (info.Operation == MENLO_OP_READ_MEM) {
8071 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8072 		    DDI_DMA_SYNC_FORKERNEL);
8073 		if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8074 		    dma_mem->bp, info.TotalByteCount, mode) !=
8075 		    info.TotalByteCount) {
8076 			cmd->Status = EXT_STATUS_COPY_ERR;
8077 			cmd->ResponseLen = 0;
8078 		}
8079 	}
8080 
8081 	ql_free_dma_resource(ha, dma_mem);
8082 	kmem_free(dma_mem, sizeof (dma_mem_t));
8083 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8084 
8085 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8086 }
8087 
8088 /*
8089  * ql_suspend_hba
8090  *	Suspends all adapter ports.
8091  *
8092  * Input:
8093  *	ha:		adapter state pointer.
8094  *	options:	BIT_0 --> leave driver stalled on exit if
8095  *				  failed.
8096  *
8097  * Returns:
8098  *	ql local function return status code.
8099  *
8100  * Context:
8101  *	Kernel context.
8102  */
8103 static int
8104 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8105 {
8106 	ql_adapter_state_t	*ha2;
8107 	ql_link_t		*link;
8108 	int			rval = QL_SUCCESS;
8109 
8110 	/* Quiesce I/O on all adapter ports */
8111 	for (link = ql_hba.first; link != NULL; link = link->next) {
8112 		ha2 = link->base_address;
8113 
8114 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8115 			continue;
8116 		}
8117 
8118 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
8119 			EL(ha, "ql_stall_driver status=%xh\n", rval);
8120 			break;
8121 		}
8122 	}
8123 
8124 	return (rval);
8125 }
8126 
8127 /*
8128  * ql_restart_hba
8129  *	Restarts adapter.
8130  *
8131  * Input:
8132  *	ha:	adapter state pointer.
8133  *
8134  * Context:
8135  *	Kernel context.
8136  */
8137 static void
8138 ql_restart_hba(ql_adapter_state_t *ha)
8139 {
8140 	ql_adapter_state_t	*ha2;
8141 	ql_link_t		*link;
8142 
8143 	/* Resume I/O on all adapter ports */
8144 	for (link = ql_hba.first; link != NULL; link = link->next) {
8145 		ha2 = link->base_address;
8146 
8147 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8148 			continue;
8149 		}
8150 
8151 		ql_restart_driver(ha2);
8152 	}
8153 }
8154 
8155 /*
8156  * ql_get_vp_cnt_id
8157  *	Retrieves pci config space data
8158  *
8159  * Input:
8160  *	ha:	adapter state pointer.
8161  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8162  *	mode:	flags.
8163  *
8164  * Returns:
8165  *	None, request status indicated in cmd->Status.
8166  *
8167  * Context:
8168  *	Kernel context.
8169  *
8170  */
8171 static void
8172 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8173 {
8174 	ql_adapter_state_t	*vha;
8175 	PEXT_VPORT_ID_CNT	ptmp_vp;
8176 	int			id = 0;
8177 	int			rval;
8178 	char			name[MAXPATHLEN];
8179 
8180 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8181 
8182 	/*
8183 	 * To be backward compatible with older API
8184 	 * check for the size of old EXT_VPORT_ID_CNT
8185 	 */
8186 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
8187 	    (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
8188 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8189 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
8190 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
8191 		    cmd->ResponseLen);
8192 		cmd->ResponseLen = 0;
8193 		return;
8194 	}
8195 
8196 	ptmp_vp = (EXT_VPORT_ID_CNT *)
8197 	    kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
8198 	if (ptmp_vp == NULL) {
8199 		EL(ha, "failed, kmem_zalloc\n");
8200 		cmd->ResponseLen = 0;
8201 		return;
8202 	}
8203 	vha = ha->vp_next;
8204 	while (vha != NULL) {
8205 		ptmp_vp->VpCnt++;
8206 		ptmp_vp->VpId[id] = vha->vp_index;
8207 		(void) ddi_pathname(vha->dip, name);
8208 		(void) strcpy((char *)ptmp_vp->vp_path[id], name);
8209 		ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
8210 		id++;
8211 		vha = vha->vp_next;
8212 	}
8213 	rval = ddi_copyout((void *)ptmp_vp,
8214 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8215 	    cmd->ResponseLen, mode);
8216 	if (rval != 0) {
8217 		cmd->Status = EXT_STATUS_COPY_ERR;
8218 		cmd->ResponseLen = 0;
8219 		EL(ha, "failed, ddi_copyout\n");
8220 	} else {
8221 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
8222 		QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n",
8223 		    ha->instance, ptmp_vp->VpCnt);
8224 	}
8225 
8226 }
8227 
8228 /*
8229  * ql_vp_ioctl
8230  *	Performs all EXT_CC_VPORT_CMD functions.
8231  *
8232  * Input:
8233  *	ha:	adapter state pointer.
8234  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8235  *	mode:	flags.
8236  *
8237  * Returns:
8238  *	None, request status indicated in cmd->Status.
8239  *
8240  * Context:
8241  *	Kernel context.
8242  */
8243 static void
8244 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8245 {
8246 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
8247 	    cmd->SubCode);
8248 
8249 	/* case off on command subcode */
8250 	switch (cmd->SubCode) {
8251 	case EXT_VF_SC_VPORT_GETINFO:
8252 		ql_qry_vport(ha, cmd, mode);
8253 		break;
8254 	default:
8255 		/* function not supported. */
8256 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
8257 		EL(ha, "failed, Unsupported Subcode=%xh\n",
8258 		    cmd->SubCode);
8259 		break;
8260 	}
8261 
8262 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8263 }
8264 
8265 /*
8266  * ql_qry_vport
8267  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
8268  *
8269  * Input:
8270  *	ha:	adapter state pointer.
8271  *	cmd:	EXT_IOCTL cmd struct pointer.
8272  *	mode:	flags.
8273  *
8274  * Returns:
8275  *	None, request status indicated in cmd->Status.
8276  *
8277  * Context:
8278  *	Kernel context.
8279  */
8280 static void
8281 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
8282 {
8283 	ql_adapter_state_t	*tmp_vha;
8284 	EXT_VPORT_INFO		tmp_vport = {0};
8285 	int			max_vport;
8286 
8287 	QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance);
8288 
8289 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
8290 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8291 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
8292 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
8293 		    cmd->ResponseLen);
8294 		cmd->ResponseLen = 0;
8295 		return;
8296 	}
8297 
8298 	/* Fill in the vport information. */
8299 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
8300 	    EXT_DEF_WWN_NAME_SIZE);
8301 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
8302 	    EXT_DEF_WWN_NAME_SIZE);
8303 	tmp_vport.state = vha->state;
8304 
8305 	tmp_vha = vha->pha->vp_next;
8306 	while (tmp_vha != NULL) {
8307 		tmp_vport.used++;
8308 		tmp_vha = tmp_vha->vp_next;
8309 	}
8310 
8311 	max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
8312 	    MAX_25_VIRTUAL_PORTS);
8313 	if (max_vport > tmp_vport.used) {
8314 		tmp_vport.free = max_vport - tmp_vport.used;
8315 	}
8316 
8317 	if (ddi_copyout((void *)&tmp_vport,
8318 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8319 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
8320 		cmd->Status = EXT_STATUS_COPY_ERR;
8321 		cmd->ResponseLen = 0;
8322 		EL(vha, "failed, ddi_copyout\n");
8323 	} else {
8324 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
8325 		QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance);
8326 	}
8327 }
8328 
8329 /*
8330  * ql_access_flash
8331  *	Performs all EXT_CC_ACCESS_FLASH_OS functions.
8332  *
8333  * Input:
8334  *	pi:	port info pointer.
8335  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8336  *	mode:	flags.
8337  *
8338  * Returns:
8339  *	None, request status indicated in cmd->Status.
8340  *
8341  * Context:
8342  *	Kernel context.
8343  */
8344 static void
8345 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8346 {
8347 	int	rval;
8348 
8349 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8350 
8351 	switch (cmd->SubCode) {
8352 	case EXT_SC_FLASH_READ:
8353 		if ((rval = ql_flash_fcode_dump(ha,
8354 		    (void *)(uintptr_t)(cmd->ResponseAdr),
8355 		    (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
8356 			cmd->Status = EXT_STATUS_COPY_ERR;
8357 			cmd->ResponseLen = 0;
8358 			EL(ha, "flash_fcode_dump status=%xh\n", rval);
8359 		}
8360 		break;
8361 	case EXT_SC_FLASH_WRITE:
8362 		if ((rval = ql_r_m_w_flash(ha,
8363 		    (void *)(uintptr_t)(cmd->RequestAdr),
8364 		    (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
8365 		    QL_SUCCESS) {
8366 			cmd->Status = EXT_STATUS_COPY_ERR;
8367 			cmd->ResponseLen = 0;
8368 			EL(ha, "r_m_w_flash status=%xh\n", rval);
8369 		} else {
8370 			/* Reset caches on all adapter instances. */
8371 			ql_update_flash_caches(ha);
8372 		}
8373 		break;
8374 	default:
8375 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8376 		cmd->Status = EXT_STATUS_ERR;
8377 		cmd->ResponseLen = 0;
8378 		break;
8379 	}
8380 
8381 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8382 }
8383 
8384 /*
8385  * ql_reset_cmd
8386  *	Performs all EXT_CC_RESET_FW_OS functions.
8387  *
8388  * Input:
8389  *	ha:	adapter state pointer.
8390  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8391  *
8392  * Returns:
8393  *	None, request status indicated in cmd->Status.
8394  *
8395  * Context:
8396  *	Kernel context.
8397  */
8398 static void
8399 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
8400 {
8401 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8402 
8403 	switch (cmd->SubCode) {
8404 	case EXT_SC_RESET_FC_FW:
8405 		EL(ha, "isp_abort_needed\n");
8406 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
8407 		break;
8408 	case EXT_SC_RESET_MPI_FW:
8409 		if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8410 			EL(ha, "invalid request for HBA\n");
8411 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
8412 			cmd->ResponseLen = 0;
8413 		} else {
8414 			/* Wait for I/O to stop and daemon to stall. */
8415 			if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8416 				EL(ha, "ql_suspend_hba failed\n");
8417 				cmd->Status = EXT_STATUS_BUSY;
8418 				cmd->ResponseLen = 0;
8419 			} else if (ql_restart_mpi(ha) != QL_SUCCESS) {
8420 				cmd->Status = EXT_STATUS_ERR;
8421 				cmd->ResponseLen = 0;
8422 			}
8423 			ql_restart_hba(ha);
8424 		}
8425 		break;
8426 	default:
8427 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8428 		cmd->Status = EXT_STATUS_ERR;
8429 		cmd->ResponseLen = 0;
8430 		break;
8431 	}
8432 
8433 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8434 }
8435 /*
8436  * ql_get_dcbx_parameters
8437  *	Get DCBX parameters.
8438  *
8439  * Input:
8440  *	ha:	adapter state pointer.
8441  *	cmd:	User space CT arguments pointer.
8442  *	mode:	flags.
8443  */
8444 static void
8445 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8446 {
8447 	uint8_t		*tmp_buf;
8448 	int		rval;
8449 
8450 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8451 
8452 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8453 		EL(ha, "invalid request for HBA\n");
8454 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8455 		cmd->ResponseLen = 0;
8456 	}
8457 
8458 	if (cmd->ResponseLen < EXT_DEF_DCBX_PARAM_BUF_SIZE) {
8459 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8460 		cmd->DetailStatus = EXT_DEF_DCBX_PARAM_BUF_SIZE;
8461 		EL(ha, "failed, ResponseLen != %xh, Len=%xh\n",
8462 		    EXT_DEF_DCBX_PARAM_BUF_SIZE, cmd->ResponseLen);
8463 		cmd->ResponseLen = 0;
8464 		return;
8465 	}
8466 
8467 	/* Allocate memory for command. */
8468 	tmp_buf = kmem_zalloc(cmd->ResponseLen, KM_SLEEP);
8469 	if (tmp_buf == NULL) {
8470 		EL(ha, "failed, kmem_zalloc\n");
8471 		cmd->Status = EXT_STATUS_NO_MEMORY;
8472 		cmd->ResponseLen = 0;
8473 		return;
8474 	}
8475 	/* Send command */
8476 	rval = ql_get_dcbx_params(ha, cmd->ResponseLen, (caddr_t)tmp_buf);
8477 	if (rval != QL_SUCCESS) {
8478 		/* error */
8479 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
8480 		kmem_free(tmp_buf, cmd->ResponseLen);
8481 		cmd->Status = EXT_STATUS_ERR;
8482 		cmd->ResponseLen = 0;
8483 		return;
8484 	}
8485 
8486 	/* Copy the response */
8487 	if (ql_send_buffer_data((caddr_t)tmp_buf,
8488 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
8489 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
8490 		EL(ha, "failed, ddi_copyout\n");
8491 		kmem_free(tmp_buf, cmd->ResponseLen);
8492 		cmd->Status = EXT_STATUS_COPY_ERR;
8493 		cmd->ResponseLen = 0;
8494 	} else {
8495 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8496 		kmem_free(tmp_buf, cmd->ResponseLen);
8497 	}
8498 
8499 }
8500