xref: /titanic_41/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_xioctl.c (revision db5b5f1e7e8c59712dae4a0b5c55ecee0e4c1bfc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 /*
29  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30  */
31 
32 #pragma ident	"Copyright 2010 QLogic Corporation; ql_xioctl.c"
33 
34 /*
35  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
36  *
37  * ***********************************************************************
38  * *									**
39  * *				NOTICE					**
40  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
41  * *			ALL RIGHTS RESERVED				**
42  * *									**
43  * ***********************************************************************
44  *
45  */
46 
47 #include <ql_apps.h>
48 #include <ql_api.h>
49 #include <ql_debug.h>
50 #include <ql_init.h>
51 #include <ql_iocb.h>
52 #include <ql_ioctl.h>
53 #include <ql_mbx.h>
54 #include <ql_xioctl.h>
55 
56 /*
57  * Local data
58  */
59 
60 /*
61  * Local prototypes
62  */
63 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
64 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
65     boolean_t (*)(EXT_IOCTL *));
66 static boolean_t ql_validate_signature(EXT_IOCTL *);
67 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
68 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
91 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
92 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
93 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
94 
95 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
96 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
97 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
98 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
99     uint8_t);
100 static uint32_t	ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
101 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
102 static int ql_24xx_flash_desc(ql_adapter_state_t *);
103 static int ql_setup_flash(ql_adapter_state_t *);
104 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
105 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
106 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
107     uint32_t, int);
108 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
109     uint8_t);
110 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
113 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
116 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
117 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
119 static uint32_t ql_setup_led(ql_adapter_state_t *);
120 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
121 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
122 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
124 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
125 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
126 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
127 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
128 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
129 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
130 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
131 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
132 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
133 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
135 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
139 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
140 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
143 static void ql_restart_hba(ql_adapter_state_t *);
144 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
145 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
146 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
147 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
148 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
149 static void ql_update_flash_caches(ql_adapter_state_t *);
150 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
151 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
152 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
153 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
154 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
155 static int ql_set_loop_point(ql_adapter_state_t *, uint16_t);
156 
157 /* ******************************************************************** */
158 /*			External IOCTL support.				*/
159 /* ******************************************************************** */
160 
161 /*
162  * ql_alloc_xioctl_resource
163  *	Allocates resources needed by module code.
164  *
165  * Input:
166  *	ha:		adapter state pointer.
167  *
168  * Returns:
169  *	SYS_ERRNO
170  *
171  * Context:
172  *	Kernel context.
173  */
174 int
ql_alloc_xioctl_resource(ql_adapter_state_t * ha)175 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
176 {
177 	ql_xioctl_t	*xp;
178 
179 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
180 
181 	if (ha->xioctl != NULL) {
182 		QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
183 		    ha->instance);
184 		return (0);
185 	}
186 
187 	xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
188 	if (xp == NULL) {
189 		EL(ha, "failed, kmem_zalloc\n");
190 		return (ENOMEM);
191 	}
192 	ha->xioctl = xp;
193 
194 	/* Allocate AEN tracking buffer */
195 	xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
196 	    sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
197 	if (xp->aen_tracking_queue == NULL) {
198 		EL(ha, "failed, kmem_zalloc-2\n");
199 		ql_free_xioctl_resource(ha);
200 		return (ENOMEM);
201 	}
202 
203 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
204 
205 	return (0);
206 }
207 
208 /*
209  * ql_free_xioctl_resource
210  *	Frees resources used by module code.
211  *
212  * Input:
213  *	ha:		adapter state pointer.
214  *
215  * Context:
216  *	Kernel context.
217  */
218 void
ql_free_xioctl_resource(ql_adapter_state_t * ha)219 ql_free_xioctl_resource(ql_adapter_state_t *ha)
220 {
221 	ql_xioctl_t	*xp = ha->xioctl;
222 
223 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
224 
225 	if (xp == NULL) {
226 		QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
227 		return;
228 	}
229 
230 	if (xp->aen_tracking_queue != NULL) {
231 		kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
232 		    sizeof (EXT_ASYNC_EVENT));
233 		xp->aen_tracking_queue = NULL;
234 	}
235 
236 	kmem_free(xp, sizeof (ql_xioctl_t));
237 	ha->xioctl = NULL;
238 
239 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
240 }
241 
242 /*
243  * ql_xioctl
244  *	External IOCTL processing.
245  *
246  * Input:
247  *	ha:	adapter state pointer.
248  *	cmd:	function to perform
249  *	arg:	data type varies with request
250  *	mode:	flags
251  *	cred_p:	credentials pointer
252  *	rval_p:	pointer to result value
253  *
254  * Returns:
255  *	0:		success
256  *	ENXIO:		No such device or address
257  *	ENOPROTOOPT:	Protocol not available
258  *
259  * Context:
260  *	Kernel context.
261  */
262 /* ARGSUSED */
263 int
ql_xioctl(ql_adapter_state_t * ha,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)264 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
265     cred_t *cred_p, int *rval_p)
266 {
267 	int	rval;
268 
269 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
270 
271 	if (ha->xioctl == NULL) {
272 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
273 		return (ENXIO);
274 	}
275 
276 	switch (cmd) {
277 	case EXT_CC_QUERY:
278 	case EXT_CC_SEND_FCCT_PASSTHRU:
279 	case EXT_CC_REG_AEN:
280 	case EXT_CC_GET_AEN:
281 	case EXT_CC_SEND_SCSI_PASSTHRU:
282 	case EXT_CC_WWPN_TO_SCSIADDR:
283 	case EXT_CC_SEND_ELS_RNID:
284 	case EXT_CC_SET_DATA:
285 	case EXT_CC_GET_DATA:
286 	case EXT_CC_HOST_IDX:
287 	case EXT_CC_READ_NVRAM:
288 	case EXT_CC_UPDATE_NVRAM:
289 	case EXT_CC_READ_OPTION_ROM:
290 	case EXT_CC_READ_OPTION_ROM_EX:
291 	case EXT_CC_UPDATE_OPTION_ROM:
292 	case EXT_CC_UPDATE_OPTION_ROM_EX:
293 	case EXT_CC_GET_VPD:
294 	case EXT_CC_SET_VPD:
295 	case EXT_CC_LOOPBACK:
296 	case EXT_CC_GET_FCACHE:
297 	case EXT_CC_GET_FCACHE_EX:
298 	case EXT_CC_HOST_DRVNAME:
299 	case EXT_CC_GET_SFP_DATA:
300 	case EXT_CC_PORT_PARAM:
301 	case EXT_CC_GET_PCI_DATA:
302 	case EXT_CC_GET_FWEXTTRACE:
303 	case EXT_CC_GET_FWFCETRACE:
304 	case EXT_CC_GET_VP_CNT_ID:
305 	case EXT_CC_VPORT_CMD:
306 	case EXT_CC_ACCESS_FLASH:
307 	case EXT_CC_RESET_FW:
308 	case EXT_CC_MENLO_MANAGE_INFO:
309 		rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
310 		break;
311 	default:
312 		/* function not supported. */
313 		EL(ha, "function=%d not supported\n", cmd);
314 		rval = ENOPROTOOPT;
315 	}
316 
317 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
318 
319 	return (rval);
320 }
321 
322 /*
323  * ql_sdm_ioctl
324  *	Provides ioctl functions for SAN/Device Management functions
325  *	AKA External Ioctl functions.
326  *
327  * Input:
328  *	ha:		adapter state pointer.
329  *	ioctl_code:	ioctl function to perform
330  *	arg:		Pointer to EXT_IOCTL cmd data in application land.
331  *	mode:		flags
332  *
333  * Returns:
334  *	0:	success
335  *	ENOMEM:	Alloc of local EXT_IOCTL struct failed.
336  *	EFAULT:	Copyin of caller's EXT_IOCTL struct failed or
337  *		copyout of EXT_IOCTL status info failed.
338  *	EINVAL:	Signature or version of caller's EXT_IOCTL invalid.
339  *	EBUSY:	Device busy
340  *
341  * Context:
342  *	Kernel context.
343  */
344 static int
ql_sdm_ioctl(ql_adapter_state_t * ha,int ioctl_code,void * arg,int mode)345 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
346 {
347 	EXT_IOCTL		*cmd;
348 	int			rval;
349 	ql_adapter_state_t	*vha;
350 
351 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
352 
353 	/* Copy argument structure (EXT_IOCTL) from application land. */
354 	if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
355 	    ql_validate_signature)) != 0) {
356 		/*
357 		 * a non-zero value at this time means a problem getting
358 		 * the requested information from application land, just
359 		 * return the error code and hope for the best.
360 		 */
361 		EL(ha, "failed, sdm_setup\n");
362 		return (rval);
363 	}
364 
365 	/*
366 	 * Map the physical ha ptr (which the ioctl is called with)
367 	 * to the virtual ha that the caller is addressing.
368 	 */
369 	if (ha->flags & VP_ENABLED) {
370 		/* Check that it is within range. */
371 		if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ?
372 		    MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
373 			EL(ha, "Invalid HbaSelect vp index: %xh\n",
374 			    cmd->HbaSelect);
375 			cmd->Status = EXT_STATUS_INVALID_VPINDEX;
376 			cmd->ResponseLen = 0;
377 			return (EFAULT);
378 		}
379 		/*
380 		 * Special case: HbaSelect == 0 is physical ha
381 		 */
382 		if (cmd->HbaSelect != 0) {
383 			vha = ha->vp_next;
384 			while (vha != NULL) {
385 				if (vha->vp_index == cmd->HbaSelect) {
386 					ha = vha;
387 					break;
388 				}
389 				vha = vha->vp_next;
390 			}
391 			/*
392 			 * The specified vp index may be valid(within range)
393 			 * but it's not in the list. Currently this is all
394 			 * we can say.
395 			 */
396 			if (vha == NULL) {
397 				cmd->Status = EXT_STATUS_INVALID_VPINDEX;
398 				cmd->ResponseLen = 0;
399 				return (EFAULT);
400 			}
401 		}
402 	}
403 
404 	/*
405 	 * If driver is suspended, stalled, or powered down rtn BUSY
406 	 */
407 	if (ha->flags & ADAPTER_SUSPENDED ||
408 	    ha->task_daemon_flags & DRIVER_STALL ||
409 	    ha->power_level != PM_LEVEL_D0) {
410 		EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
411 		    "driver suspended" :
412 		    (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
413 		    "FCA powered down"));
414 		cmd->Status = EXT_STATUS_BUSY;
415 		cmd->ResponseLen = 0;
416 		rval = EBUSY;
417 
418 		/* Return results to caller */
419 		if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
420 			EL(ha, "failed, sdm_return\n");
421 			rval = EFAULT;
422 		}
423 		return (rval);
424 	}
425 
426 	switch (ioctl_code) {
427 	case EXT_CC_QUERY_OS:
428 		ql_query(ha, cmd, mode);
429 		break;
430 	case EXT_CC_SEND_FCCT_PASSTHRU_OS:
431 		ql_fcct(ha, cmd, mode);
432 		break;
433 	case EXT_CC_REG_AEN_OS:
434 		ql_aen_reg(ha, cmd, mode);
435 		break;
436 	case EXT_CC_GET_AEN_OS:
437 		ql_aen_get(ha, cmd, mode);
438 		break;
439 	case EXT_CC_GET_DATA_OS:
440 		ql_get_host_data(ha, cmd, mode);
441 		break;
442 	case EXT_CC_SET_DATA_OS:
443 		ql_set_host_data(ha, cmd, mode);
444 		break;
445 	case EXT_CC_SEND_ELS_RNID_OS:
446 		ql_send_els_rnid(ha, cmd, mode);
447 		break;
448 	case EXT_CC_SCSI_PASSTHRU_OS:
449 		ql_scsi_passthru(ha, cmd, mode);
450 		break;
451 	case EXT_CC_WWPN_TO_SCSIADDR_OS:
452 		ql_wwpn_to_scsiaddr(ha, cmd, mode);
453 		break;
454 	case EXT_CC_HOST_IDX_OS:
455 		ql_host_idx(ha, cmd, mode);
456 		break;
457 	case EXT_CC_HOST_DRVNAME_OS:
458 		ql_host_drvname(ha, cmd, mode);
459 		break;
460 	case EXT_CC_READ_NVRAM_OS:
461 		ql_read_nvram(ha, cmd, mode);
462 		break;
463 	case EXT_CC_UPDATE_NVRAM_OS:
464 		ql_write_nvram(ha, cmd, mode);
465 		break;
466 	case EXT_CC_READ_OPTION_ROM_OS:
467 	case EXT_CC_READ_OPTION_ROM_EX_OS:
468 		ql_read_flash(ha, cmd, mode);
469 		break;
470 	case EXT_CC_UPDATE_OPTION_ROM_OS:
471 	case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
472 		ql_write_flash(ha, cmd, mode);
473 		break;
474 	case EXT_CC_LOOPBACK_OS:
475 		ql_diagnostic_loopback(ha, cmd, mode);
476 		break;
477 	case EXT_CC_GET_VPD_OS:
478 		ql_read_vpd(ha, cmd, mode);
479 		break;
480 	case EXT_CC_SET_VPD_OS:
481 		ql_write_vpd(ha, cmd, mode);
482 		break;
483 	case EXT_CC_GET_FCACHE_OS:
484 		ql_get_fcache(ha, cmd, mode);
485 		break;
486 	case EXT_CC_GET_FCACHE_EX_OS:
487 		ql_get_fcache_ex(ha, cmd, mode);
488 		break;
489 	case EXT_CC_GET_SFP_DATA_OS:
490 		ql_get_sfp(ha, cmd, mode);
491 		break;
492 	case EXT_CC_PORT_PARAM_OS:
493 		ql_port_param(ha, cmd, mode);
494 		break;
495 	case EXT_CC_GET_PCI_DATA_OS:
496 		ql_get_pci_data(ha, cmd, mode);
497 		break;
498 	case EXT_CC_GET_FWEXTTRACE_OS:
499 		ql_get_fwexttrace(ha, cmd, mode);
500 		break;
501 	case EXT_CC_GET_FWFCETRACE_OS:
502 		ql_get_fwfcetrace(ha, cmd, mode);
503 		break;
504 	case EXT_CC_MENLO_RESET:
505 		ql_menlo_reset(ha, cmd, mode);
506 		break;
507 	case EXT_CC_MENLO_GET_FW_VERSION:
508 		ql_menlo_get_fw_version(ha, cmd, mode);
509 		break;
510 	case EXT_CC_MENLO_UPDATE_FW:
511 		ql_menlo_update_fw(ha, cmd, mode);
512 		break;
513 	case EXT_CC_MENLO_MANAGE_INFO:
514 		ql_menlo_manage_info(ha, cmd, mode);
515 		break;
516 	case EXT_CC_GET_VP_CNT_ID_OS:
517 		ql_get_vp_cnt_id(ha, cmd, mode);
518 		break;
519 	case EXT_CC_VPORT_CMD_OS:
520 		ql_vp_ioctl(ha, cmd, mode);
521 		break;
522 	case EXT_CC_ACCESS_FLASH_OS:
523 		ql_access_flash(ha, cmd, mode);
524 		break;
525 	case EXT_CC_RESET_FW_OS:
526 		ql_reset_cmd(ha, cmd);
527 		break;
528 	default:
529 		/* function not supported. */
530 		EL(ha, "failed, function not supported=%d\n", ioctl_code);
531 
532 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
533 		cmd->ResponseLen = 0;
534 		break;
535 	}
536 
537 	/* Return results to caller */
538 	if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
539 		EL(ha, "failed, sdm_return\n");
540 		return (EFAULT);
541 	}
542 
543 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
544 
545 	return (0);
546 }
547 
548 /*
549  * ql_sdm_setup
550  *	Make a local copy of the EXT_IOCTL struct and validate it.
551  *
552  * Input:
553  *	ha:		adapter state pointer.
554  *	cmd_struct:	Pointer to location to store local adrs of EXT_IOCTL.
555  *	arg:		Address of application EXT_IOCTL cmd data
556  *	mode:		flags
557  *	val_sig:	Pointer to a function to validate the ioctl signature.
558  *
559  * Returns:
560  *	0:		success
561  *	EFAULT:		Copy in error of application EXT_IOCTL struct.
562  *	EINVAL:		Invalid version, signature.
563  *	ENOMEM:		Local allocation of EXT_IOCTL failed.
564  *
565  * Context:
566  *	Kernel context.
567  */
568 static int
ql_sdm_setup(ql_adapter_state_t * ha,EXT_IOCTL ** cmd_struct,void * arg,int mode,boolean_t (* val_sig)(EXT_IOCTL *))569 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
570     int mode, boolean_t (*val_sig)(EXT_IOCTL *))
571 {
572 	int		rval;
573 	EXT_IOCTL	*cmd;
574 
575 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
576 
577 	/* Allocate local memory for EXT_IOCTL. */
578 	*cmd_struct = NULL;
579 	cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
580 	if (cmd == NULL) {
581 		EL(ha, "failed, kmem_zalloc\n");
582 		return (ENOMEM);
583 	}
584 	/* Get argument structure. */
585 	rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
586 	if (rval != 0) {
587 		EL(ha, "failed, ddi_copyin\n");
588 		rval = EFAULT;
589 	} else {
590 		/*
591 		 * Check signature and the version.
592 		 * If either are not valid then neither is the
593 		 * structure so don't attempt to return any error status
594 		 * because we can't trust what caller's arg points to.
595 		 * Just return the errno.
596 		 */
597 		if (val_sig(cmd) == 0) {
598 			EL(ha, "failed, signature\n");
599 			rval = EINVAL;
600 		} else if (cmd->Version > EXT_VERSION) {
601 			EL(ha, "failed, version\n");
602 			rval = EINVAL;
603 		}
604 	}
605 
606 	if (rval == 0) {
607 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
608 		*cmd_struct = cmd;
609 		cmd->Status = EXT_STATUS_OK;
610 		cmd->DetailStatus = 0;
611 	} else {
612 		kmem_free((void *)cmd, sizeof (EXT_IOCTL));
613 	}
614 
615 	return (rval);
616 }
617 
618 /*
619  * ql_validate_signature
620  *	Validate the signature string for an external ioctl call.
621  *
622  * Input:
623  *	sg:	Pointer to EXT_IOCTL signature to validate.
624  *
625  * Returns:
626  *	B_TRUE:		Signature is valid.
627  *	B_FALSE:	Signature is NOT valid.
628  *
629  * Context:
630  *	Kernel context.
631  */
632 static boolean_t
ql_validate_signature(EXT_IOCTL * cmd_struct)633 ql_validate_signature(EXT_IOCTL *cmd_struct)
634 {
635 	/*
636 	 * Check signature.
637 	 *
638 	 * If signature is not valid then neither is the rest of
639 	 * the structure (e.g., can't trust it), so don't attempt
640 	 * to return any error status other than the errno.
641 	 */
642 	if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
643 		QL_PRINT_2(CE_CONT, "failed,\n");
644 		return (B_FALSE);
645 	}
646 
647 	return (B_TRUE);
648 }
649 
650 /*
651  * ql_sdm_return
652  *	Copies return data/status to application land for
653  *	ioctl call using the SAN/Device Management EXT_IOCTL call interface.
654  *
655  * Input:
656  *	ha:		adapter state pointer.
657  *	cmd:		Pointer to kernel copy of requestor's EXT_IOCTL struct.
658  *	ioctl_code:	ioctl function to perform
659  *	arg:		EXT_IOCTL cmd data in application land.
660  *	mode:		flags
661  *
662  * Returns:
663  *	0:	success
664  *	EFAULT:	Copy out error.
665  *
666  * Context:
667  *	Kernel context.
668  */
669 /* ARGSUSED */
670 static int
ql_sdm_return(ql_adapter_state_t * ha,EXT_IOCTL * cmd,void * arg,int mode)671 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
672 {
673 	int	rval = 0;
674 
675 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
676 
677 	rval |= ddi_copyout((void *)&cmd->ResponseLen,
678 	    (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
679 	    mode);
680 
681 	rval |= ddi_copyout((void *)&cmd->Status,
682 	    (void *)&(((EXT_IOCTL*)arg)->Status),
683 	    sizeof (cmd->Status), mode);
684 	rval |= ddi_copyout((void *)&cmd->DetailStatus,
685 	    (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
686 	    sizeof (cmd->DetailStatus), mode);
687 
688 	kmem_free((void *)cmd, sizeof (EXT_IOCTL));
689 
690 	if (rval != 0) {
691 		/* Some copyout operation failed */
692 		EL(ha, "failed, ddi_copyout\n");
693 		return (EFAULT);
694 	}
695 
696 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
697 
698 	return (0);
699 }
700 
701 /*
702  * ql_query
703  *	Performs all EXT_CC_QUERY functions.
704  *
705  * Input:
706  *	ha:	adapter state pointer.
707  *	cmd:	Local EXT_IOCTL cmd struct pointer.
708  *	mode:	flags.
709  *
710  * Returns:
711  *	None, request status indicated in cmd->Status.
712  *
713  * Context:
714  *	Kernel context.
715  */
716 static void
ql_query(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)717 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
718 {
719 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
720 	    cmd->SubCode);
721 
722 	/* case off on command subcode */
723 	switch (cmd->SubCode) {
724 	case EXT_SC_QUERY_HBA_NODE:
725 		ql_qry_hba_node(ha, cmd, mode);
726 		break;
727 	case EXT_SC_QUERY_HBA_PORT:
728 		ql_qry_hba_port(ha, cmd, mode);
729 		break;
730 	case EXT_SC_QUERY_DISC_PORT:
731 		ql_qry_disc_port(ha, cmd, mode);
732 		break;
733 	case EXT_SC_QUERY_DISC_TGT:
734 		ql_qry_disc_tgt(ha, cmd, mode);
735 		break;
736 	case EXT_SC_QUERY_DRIVER:
737 		ql_qry_driver(ha, cmd, mode);
738 		break;
739 	case EXT_SC_QUERY_FW:
740 		ql_qry_fw(ha, cmd, mode);
741 		break;
742 	case EXT_SC_QUERY_CHIP:
743 		ql_qry_chip(ha, cmd, mode);
744 		break;
745 	case EXT_SC_QUERY_CNA_PORT:
746 		ql_qry_cna_port(ha, cmd, mode);
747 		break;
748 	case EXT_SC_QUERY_ADAPTER_VERSIONS:
749 		ql_qry_adapter_versions(ha, cmd, mode);
750 		break;
751 	case EXT_SC_QUERY_DISC_LUN:
752 	default:
753 		/* function not supported. */
754 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
755 		EL(ha, "failed, Unsupported Subcode=%xh\n",
756 		    cmd->SubCode);
757 		break;
758 	}
759 
760 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
761 }
762 
763 /*
764  * ql_qry_hba_node
765  *	Performs EXT_SC_QUERY_HBA_NODE subfunction.
766  *
767  * Input:
768  *	ha:	adapter state pointer.
769  *	cmd:	EXT_IOCTL cmd struct pointer.
770  *	mode:	flags.
771  *
772  * Returns:
773  *	None, request status indicated in cmd->Status.
774  *
775  * Context:
776  *	Kernel context.
777  */
778 static void
ql_qry_hba_node(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)779 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
780 {
781 	EXT_HBA_NODE	tmp_node = {0};
782 	uint_t		len;
783 	caddr_t		bufp;
784 
785 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
786 
787 	if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
788 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
789 		cmd->DetailStatus = sizeof (EXT_HBA_NODE);
790 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
791 		    "Len=%xh\n", cmd->ResponseLen);
792 		cmd->ResponseLen = 0;
793 		return;
794 	}
795 
796 	/* fill in the values */
797 
798 	bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
799 	    EXT_DEF_WWN_NAME_SIZE);
800 
801 	(void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
802 
803 	(void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
804 
805 	bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
806 
807 	(void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
808 
809 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
810 		size_t		verlen;
811 		uint16_t	w;
812 		char		*tmpptr;
813 
814 		verlen = strlen((char *)(tmp_node.DriverVersion));
815 		if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
816 			EL(ha, "failed, No room for fpga version string\n");
817 		} else {
818 			w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
819 			    (uint16_t *)
820 			    (ha->sbus_fpga_iobase + FPGA_REVISION));
821 
822 			tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
823 			if (tmpptr == NULL) {
824 				EL(ha, "Unable to insert fpga version str\n");
825 			} else {
826 				(void) sprintf(tmpptr, "%d.%d",
827 				    ((w & 0xf0) >> 4), (w & 0x0f));
828 				tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
829 			}
830 		}
831 	}
832 
833 	(void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
834 	    ha->fw_major_version, ha->fw_minor_version,
835 	    ha->fw_subminor_version);
836 
837 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
838 		switch (ha->fw_attributes) {
839 		case FWATTRIB_EF:
840 			(void) strcat((char *)(tmp_node.FWVersion), " EF");
841 			break;
842 		case FWATTRIB_TP:
843 			(void) strcat((char *)(tmp_node.FWVersion), " TP");
844 			break;
845 		case FWATTRIB_IP:
846 			(void) strcat((char *)(tmp_node.FWVersion), " IP");
847 			break;
848 		case FWATTRIB_IPX:
849 			(void) strcat((char *)(tmp_node.FWVersion), " IPX");
850 			break;
851 		case FWATTRIB_FL:
852 			(void) strcat((char *)(tmp_node.FWVersion), " FL");
853 			break;
854 		case FWATTRIB_FPX:
855 			(void) strcat((char *)(tmp_node.FWVersion), " FLX");
856 			break;
857 		default:
858 			break;
859 		}
860 	}
861 
862 	/* FCode version. */
863 	/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
864 	if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
865 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
866 	    (int *)&len) == DDI_PROP_SUCCESS) {
867 		if (len < EXT_DEF_MAX_STR_SIZE) {
868 			bcopy(bufp, tmp_node.OptRomVersion, len);
869 		} else {
870 			bcopy(bufp, tmp_node.OptRomVersion,
871 			    EXT_DEF_MAX_STR_SIZE - 1);
872 			tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
873 			    '\0';
874 		}
875 		kmem_free(bufp, len);
876 	} else {
877 		(void) sprintf((char *)tmp_node.OptRomVersion, "0");
878 	}
879 	tmp_node.PortCount = 1;
880 	tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
881 
882 	if (ddi_copyout((void *)&tmp_node,
883 	    (void *)(uintptr_t)(cmd->ResponseAdr),
884 	    sizeof (EXT_HBA_NODE), mode) != 0) {
885 		cmd->Status = EXT_STATUS_COPY_ERR;
886 		cmd->ResponseLen = 0;
887 		EL(ha, "failed, ddi_copyout\n");
888 	} else {
889 		cmd->ResponseLen = sizeof (EXT_HBA_NODE);
890 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
891 	}
892 }
893 
894 /*
895  * ql_qry_hba_port
896  *	Performs EXT_SC_QUERY_HBA_PORT subfunction.
897  *
898  * Input:
899  *	ha:	adapter state pointer.
900  *	cmd:	EXT_IOCTL cmd struct pointer.
901  *	mode:	flags.
902  *
903  * Returns:
904  *	None, request status indicated in cmd->Status.
905  *
906  * Context:
907  *	Kernel context.
908  */
909 static void
ql_qry_hba_port(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)910 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
911 {
912 	ql_link_t	*link;
913 	ql_tgt_t	*tq;
914 	ql_mbx_data_t	mr;
915 	EXT_HBA_PORT	tmp_port = {0};
916 	int		rval;
917 	uint16_t	port_cnt, tgt_cnt, index;
918 
919 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
920 
921 	if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
922 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
923 		cmd->DetailStatus = sizeof (EXT_HBA_PORT);
924 		EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
925 		    cmd->ResponseLen);
926 		cmd->ResponseLen = 0;
927 		return;
928 	}
929 
930 	/* fill in the values */
931 
932 	bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
933 	    EXT_DEF_WWN_NAME_SIZE);
934 	tmp_port.Id[0] = 0;
935 	tmp_port.Id[1] = ha->d_id.b.domain;
936 	tmp_port.Id[2] = ha->d_id.b.area;
937 	tmp_port.Id[3] = ha->d_id.b.al_pa;
938 
939 	/* For now we are initiator only driver */
940 	tmp_port.Type = EXT_DEF_INITIATOR_DEV;
941 
942 	if (ha->task_daemon_flags & LOOP_DOWN) {
943 		tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
944 	} else if (DRIVER_SUSPENDED(ha)) {
945 		tmp_port.State = EXT_DEF_HBA_SUSPENDED;
946 	} else {
947 		tmp_port.State = EXT_DEF_HBA_OK;
948 	}
949 
950 	if (ha->flags & POINT_TO_POINT) {
951 		tmp_port.Mode = EXT_DEF_P2P_MODE;
952 	} else {
953 		tmp_port.Mode = EXT_DEF_LOOP_MODE;
954 	}
955 	/*
956 	 * fill in the portspeed values.
957 	 *
958 	 * default to not yet negotiated state
959 	 */
960 	tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
961 
962 	if (tmp_port.State == EXT_DEF_HBA_OK) {
963 		switch (ha->iidma_rate) {
964 		case IIDMA_RATE_1GB:
965 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
966 			break;
967 		case IIDMA_RATE_2GB:
968 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT;
969 			break;
970 		case IIDMA_RATE_4GB:
971 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT;
972 			break;
973 		case IIDMA_RATE_8GB:
974 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT;
975 			break;
976 		case IIDMA_RATE_10GB:
977 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT;
978 			break;
979 		default:
980 			tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
981 			EL(ha, "failed, data rate=%xh\n", mr.mb[1]);
982 			break;
983 		}
984 	}
985 
986 	/* Report all supported port speeds */
987 	if (CFG_IST(ha, CFG_CTRL_25XX)) {
988 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
989 		    EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
990 		    EXT_DEF_PORTSPEED_1GBIT);
991 		/*
992 		 * Correct supported speeds based on type of
993 		 * sfp that is present
994 		 */
995 		switch (ha->sfp_stat) {
996 		case 1:
997 			/* no sfp detected */
998 			break;
999 		case 2:
1000 		case 4:
1001 			/* 4GB sfp */
1002 			tmp_port.PortSupportedSpeed &=
1003 			    ~EXT_DEF_PORTSPEED_8GBIT;
1004 			break;
1005 		case 3:
1006 		case 5:
1007 			/* 8GB sfp */
1008 			tmp_port.PortSupportedSpeed &=
1009 			    ~EXT_DEF_PORTSPEED_1GBIT;
1010 			break;
1011 		default:
1012 			EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1013 			break;
1014 
1015 		}
1016 	} else if (CFG_IST(ha, CFG_CTRL_8081)) {
1017 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1018 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
1019 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1020 		    EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1021 	} else if (CFG_IST(ha, CFG_CTRL_2300)) {
1022 		tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1023 		    EXT_DEF_PORTSPEED_1GBIT);
1024 	} else if (CFG_IST(ha, CFG_CTRL_6322)) {
1025 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1026 	} else if (CFG_IST(ha, CFG_CTRL_2200)) {
1027 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1028 	} else {
1029 		tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1030 		EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1031 	}
1032 	tmp_port.LinkState2 = LSB(ha->sfp_stat);
1033 	port_cnt = 0;
1034 	tgt_cnt = 0;
1035 
1036 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1037 		for (link = ha->dev[index].first; link != NULL;
1038 		    link = link->next) {
1039 			tq = link->base_address;
1040 
1041 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1042 				continue;
1043 			}
1044 
1045 			port_cnt++;
1046 			if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1047 				tgt_cnt++;
1048 			}
1049 		}
1050 	}
1051 
1052 	tmp_port.DiscPortCount = port_cnt;
1053 	tmp_port.DiscTargetCount = tgt_cnt;
1054 
1055 	tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1056 
1057 	rval = ddi_copyout((void *)&tmp_port,
1058 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1059 	    sizeof (EXT_HBA_PORT), mode);
1060 	if (rval != 0) {
1061 		cmd->Status = EXT_STATUS_COPY_ERR;
1062 		cmd->ResponseLen = 0;
1063 		EL(ha, "failed, ddi_copyout\n");
1064 	} else {
1065 		cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1066 		QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1067 		    ha->instance, port_cnt, tgt_cnt);
1068 	}
1069 }
1070 
1071 /*
1072  * ql_qry_disc_port
1073  *	Performs EXT_SC_QUERY_DISC_PORT subfunction.
1074  *
1075  * Input:
1076  *	ha:	adapter state pointer.
1077  *	cmd:	EXT_IOCTL cmd struct pointer.
1078  *	mode:	flags.
1079  *
1080  *	cmd->Instance = Port instance in fcport chain.
1081  *
1082  * Returns:
1083  *	None, request status indicated in cmd->Status.
1084  *
1085  * Context:
1086  *	Kernel context.
1087  */
1088 static void
ql_qry_disc_port(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1089 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1090 {
1091 	EXT_DISC_PORT	tmp_port = {0};
1092 	ql_link_t	*link;
1093 	ql_tgt_t	*tq;
1094 	uint16_t	index;
1095 	uint16_t	inst = 0;
1096 
1097 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1098 
1099 	if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1100 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1101 		cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1102 		EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1103 		    cmd->ResponseLen);
1104 		cmd->ResponseLen = 0;
1105 		return;
1106 	}
1107 
1108 	for (link = NULL, index = 0;
1109 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1110 		for (link = ha->dev[index].first; link != NULL;
1111 		    link = link->next) {
1112 			tq = link->base_address;
1113 
1114 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1115 				continue;
1116 			}
1117 			if (inst != cmd->Instance) {
1118 				inst++;
1119 				continue;
1120 			}
1121 
1122 			/* fill in the values */
1123 			bcopy(tq->node_name, tmp_port.WWNN,
1124 			    EXT_DEF_WWN_NAME_SIZE);
1125 			bcopy(tq->port_name, tmp_port.WWPN,
1126 			    EXT_DEF_WWN_NAME_SIZE);
1127 
1128 			break;
1129 		}
1130 	}
1131 
1132 	if (link == NULL) {
1133 		/* no matching device */
1134 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1135 		EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1136 		cmd->ResponseLen = 0;
1137 		return;
1138 	}
1139 
1140 	tmp_port.Id[0] = 0;
1141 	tmp_port.Id[1] = tq->d_id.b.domain;
1142 	tmp_port.Id[2] = tq->d_id.b.area;
1143 	tmp_port.Id[3] = tq->d_id.b.al_pa;
1144 
1145 	tmp_port.Type = 0;
1146 	if (tq->flags & TQF_INITIATOR_DEVICE) {
1147 		tmp_port.Type = (uint16_t)(tmp_port.Type |
1148 		    EXT_DEF_INITIATOR_DEV);
1149 	} else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1150 		(void) ql_inq_scan(ha, tq, 1);
1151 	} else if (tq->flags & TQF_TAPE_DEVICE) {
1152 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1153 	}
1154 
1155 	if (tq->flags & TQF_FABRIC_DEVICE) {
1156 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1157 	} else {
1158 		tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1159 	}
1160 
1161 	tmp_port.Status = 0;
1162 	tmp_port.Bus = 0;  /* Hard-coded for Solaris */
1163 
1164 	bcopy(tq->port_name, &tmp_port.TargetId, 8);
1165 
1166 	if (ddi_copyout((void *)&tmp_port,
1167 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1168 	    sizeof (EXT_DISC_PORT), mode) != 0) {
1169 		cmd->Status = EXT_STATUS_COPY_ERR;
1170 		cmd->ResponseLen = 0;
1171 		EL(ha, "failed, ddi_copyout\n");
1172 	} else {
1173 		cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1174 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1175 	}
1176 }
1177 
1178 /*
1179  * ql_qry_disc_tgt
1180  *	Performs EXT_SC_QUERY_DISC_TGT subfunction.
1181  *
1182  * Input:
1183  *	ha:		adapter state pointer.
1184  *	cmd:		EXT_IOCTL cmd struct pointer.
1185  *	mode:		flags.
1186  *
1187  *	cmd->Instance = Port instance in fcport chain.
1188  *
1189  * Returns:
1190  *	None, request status indicated in cmd->Status.
1191  *
1192  * Context:
1193  *	Kernel context.
1194  */
1195 static void
ql_qry_disc_tgt(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1196 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1197 {
1198 	EXT_DISC_TARGET	tmp_tgt = {0};
1199 	ql_link_t	*link;
1200 	ql_tgt_t	*tq;
1201 	uint16_t	index;
1202 	uint16_t	inst = 0;
1203 
1204 	QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1205 	    cmd->Instance);
1206 
1207 	if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1208 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1209 		cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1210 		EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1211 		    cmd->ResponseLen);
1212 		cmd->ResponseLen = 0;
1213 		return;
1214 	}
1215 
1216 	/* Scan port list for requested target and fill in the values */
1217 	for (link = NULL, index = 0;
1218 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1219 		for (link = ha->dev[index].first; link != NULL;
1220 		    link = link->next) {
1221 			tq = link->base_address;
1222 
1223 			if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1224 			    tq->flags & TQF_INITIATOR_DEVICE) {
1225 				continue;
1226 			}
1227 			if (inst != cmd->Instance) {
1228 				inst++;
1229 				continue;
1230 			}
1231 
1232 			/* fill in the values */
1233 			bcopy(tq->node_name, tmp_tgt.WWNN,
1234 			    EXT_DEF_WWN_NAME_SIZE);
1235 			bcopy(tq->port_name, tmp_tgt.WWPN,
1236 			    EXT_DEF_WWN_NAME_SIZE);
1237 
1238 			break;
1239 		}
1240 	}
1241 
1242 	if (link == NULL) {
1243 		/* no matching device */
1244 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1245 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
1246 		EL(ha, "failed, not found target=%d\n", cmd->Instance);
1247 		cmd->ResponseLen = 0;
1248 		return;
1249 	}
1250 	tmp_tgt.Id[0] = 0;
1251 	tmp_tgt.Id[1] = tq->d_id.b.domain;
1252 	tmp_tgt.Id[2] = tq->d_id.b.area;
1253 	tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1254 
1255 	tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1256 
1257 	if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1258 		(void) ql_inq_scan(ha, tq, 1);
1259 	}
1260 
1261 	tmp_tgt.Type = 0;
1262 	if (tq->flags & TQF_TAPE_DEVICE) {
1263 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1264 	}
1265 
1266 	if (tq->flags & TQF_FABRIC_DEVICE) {
1267 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1268 	} else {
1269 		tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1270 	}
1271 
1272 	tmp_tgt.Status = 0;
1273 
1274 	tmp_tgt.Bus = 0;  /* Hard-coded for Solaris. */
1275 
1276 	bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1277 
1278 	if (ddi_copyout((void *)&tmp_tgt,
1279 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1280 	    sizeof (EXT_DISC_TARGET), mode) != 0) {
1281 		cmd->Status = EXT_STATUS_COPY_ERR;
1282 		cmd->ResponseLen = 0;
1283 		EL(ha, "failed, ddi_copyout\n");
1284 	} else {
1285 		cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1286 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1287 	}
1288 }
1289 
1290 /*
1291  * ql_qry_fw
1292  *	Performs EXT_SC_QUERY_FW subfunction.
1293  *
1294  * Input:
1295  *	ha:	adapter state pointer.
1296  *	cmd:	EXT_IOCTL cmd struct pointer.
1297  *	mode:	flags.
1298  *
1299  * Returns:
1300  *	None, request status indicated in cmd->Status.
1301  *
1302  * Context:
1303  *	Kernel context.
1304  */
1305 static void
ql_qry_fw(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1306 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1307 {
1308 	EXT_FW		fw_info = {0};
1309 
1310 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1311 
1312 	if (cmd->ResponseLen < sizeof (EXT_FW)) {
1313 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1314 		cmd->DetailStatus = sizeof (EXT_FW);
1315 		EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1316 		    cmd->ResponseLen);
1317 		cmd->ResponseLen = 0;
1318 		return;
1319 	}
1320 
1321 	(void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d",
1322 	    ha->fw_major_version, ha->fw_minor_version,
1323 	    ha->fw_subminor_version);
1324 
1325 	fw_info.Attrib = ha->fw_attributes;
1326 
1327 	if (ddi_copyout((void *)&fw_info,
1328 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1329 	    sizeof (EXT_FW), mode) != 0) {
1330 		cmd->Status = EXT_STATUS_COPY_ERR;
1331 		cmd->ResponseLen = 0;
1332 		EL(ha, "failed, ddi_copyout\n");
1333 		return;
1334 	} else {
1335 		cmd->ResponseLen = sizeof (EXT_FW);
1336 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1337 	}
1338 }
1339 
1340 /*
1341  * ql_qry_chip
1342  *	Performs EXT_SC_QUERY_CHIP subfunction.
1343  *
1344  * Input:
1345  *	ha:	adapter state pointer.
1346  *	cmd:	EXT_IOCTL cmd struct pointer.
1347  *	mode:	flags.
1348  *
1349  * Returns:
1350  *	None, request status indicated in cmd->Status.
1351  *
1352  * Context:
1353  *	Kernel context.
1354  */
1355 static void
ql_qry_chip(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1356 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1357 {
1358 	EXT_CHIP	chip = {0};
1359 
1360 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1361 
1362 	if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1363 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1364 		cmd->DetailStatus = sizeof (EXT_CHIP);
1365 		EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1366 		    cmd->ResponseLen);
1367 		cmd->ResponseLen = 0;
1368 		return;
1369 	}
1370 
1371 	chip.VendorId = ha->ven_id;
1372 	chip.DeviceId = ha->device_id;
1373 	chip.SubVendorId = ha->subven_id;
1374 	chip.SubSystemId = ha->subsys_id;
1375 	chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1376 	chip.IoAddrLen = 0x100;
1377 	chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1378 	chip.MemAddrLen = 0x100;
1379 	chip.ChipRevID = ha->rev_id;
1380 	if (ha->flags & FUNCTION_1) {
1381 		chip.FuncNo = 1;
1382 	}
1383 
1384 	if (ddi_copyout((void *)&chip,
1385 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1386 	    sizeof (EXT_CHIP), mode) != 0) {
1387 		cmd->Status = EXT_STATUS_COPY_ERR;
1388 		cmd->ResponseLen = 0;
1389 		EL(ha, "failed, ddi_copyout\n");
1390 	} else {
1391 		cmd->ResponseLen = sizeof (EXT_CHIP);
1392 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1393 	}
1394 }
1395 
1396 /*
1397  * ql_qry_driver
1398  *	Performs EXT_SC_QUERY_DRIVER subfunction.
1399  *
1400  * Input:
1401  *	ha:	adapter state pointer.
1402  *	cmd:	EXT_IOCTL cmd struct pointer.
1403  *	mode:	flags.
1404  *
1405  * Returns:
1406  *	None, request status indicated in cmd->Status.
1407  *
1408  * Context:
1409  *	Kernel context.
1410  */
1411 static void
ql_qry_driver(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1412 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1413 {
1414 	EXT_DRIVER	qd = {0};
1415 
1416 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1417 
1418 	if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1419 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
1420 		cmd->DetailStatus = sizeof (EXT_DRIVER);
1421 		EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1422 		    cmd->ResponseLen);
1423 		cmd->ResponseLen = 0;
1424 		return;
1425 	}
1426 
1427 	(void) strcpy((void *)&qd.Version[0], QL_VERSION);
1428 	qd.NumOfBus = 1;	/* Fixed for Solaris */
1429 	qd.TargetsPerBus = (uint16_t)
1430 	    (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ?
1431 	    MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1432 	qd.LunsPerTarget = 2030;
1433 	qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1434 	qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1435 
1436 	if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1437 	    sizeof (EXT_DRIVER), mode) != 0) {
1438 		cmd->Status = EXT_STATUS_COPY_ERR;
1439 		cmd->ResponseLen = 0;
1440 		EL(ha, "failed, ddi_copyout\n");
1441 	} else {
1442 		cmd->ResponseLen = sizeof (EXT_DRIVER);
1443 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1444 	}
1445 }
1446 
1447 /*
1448  * ql_fcct
1449  *	IOCTL management server FC-CT passthrough.
1450  *
1451  * Input:
1452  *	ha:	adapter state pointer.
1453  *	cmd:	User space CT arguments pointer.
1454  *	mode:	flags.
1455  *
1456  * Returns:
1457  *	None, request status indicated in cmd->Status.
1458  *
1459  * Context:
1460  *	Kernel context.
1461  */
1462 static void
ql_fcct(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1463 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1464 {
1465 	ql_mbx_iocb_t		*pkt;
1466 	ql_mbx_data_t		mr;
1467 	dma_mem_t		*dma_mem;
1468 	caddr_t			pld;
1469 	uint32_t		pkt_size, pld_byte_cnt, *long_ptr;
1470 	int			rval;
1471 	ql_ct_iu_preamble_t	*ct;
1472 	ql_xioctl_t		*xp = ha->xioctl;
1473 	ql_tgt_t		tq;
1474 	uint16_t		comp_status, loop_id;
1475 
1476 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1477 
1478 	/* Get CT argument structure. */
1479 	if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1480 		EL(ha, "failed, No switch\n");
1481 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1482 		cmd->ResponseLen = 0;
1483 		return;
1484 	}
1485 
1486 	if (DRIVER_SUSPENDED(ha)) {
1487 		EL(ha, "failed, LOOP_NOT_READY\n");
1488 		cmd->Status = EXT_STATUS_BUSY;
1489 		cmd->ResponseLen = 0;
1490 		return;
1491 	}
1492 
1493 	/* Login management server device. */
1494 	if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1495 		tq.d_id.b.al_pa = 0xfa;
1496 		tq.d_id.b.area = 0xff;
1497 		tq.d_id.b.domain = 0xff;
1498 		tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1499 		    MANAGEMENT_SERVER_24XX_LOOP_ID :
1500 		    MANAGEMENT_SERVER_LOOP_ID);
1501 		rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1502 		if (rval != QL_SUCCESS) {
1503 			EL(ha, "failed, server login\n");
1504 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1505 			cmd->ResponseLen = 0;
1506 			return;
1507 		} else {
1508 			xp->flags |= QL_MGMT_SERVER_LOGIN;
1509 		}
1510 	}
1511 
1512 	QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1513 	QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1514 
1515 	/* Allocate a DMA Memory Descriptor */
1516 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1517 	if (dma_mem == NULL) {
1518 		EL(ha, "failed, kmem_zalloc\n");
1519 		cmd->Status = EXT_STATUS_NO_MEMORY;
1520 		cmd->ResponseLen = 0;
1521 		return;
1522 	}
1523 	/* Determine maximum buffer size. */
1524 	if (cmd->RequestLen < cmd->ResponseLen) {
1525 		pld_byte_cnt = cmd->ResponseLen;
1526 	} else {
1527 		pld_byte_cnt = cmd->RequestLen;
1528 	}
1529 
1530 	/* Allocate command block. */
1531 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1532 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1533 	if (pkt == NULL) {
1534 		EL(ha, "failed, kmem_zalloc\n");
1535 		cmd->Status = EXT_STATUS_NO_MEMORY;
1536 		cmd->ResponseLen = 0;
1537 		return;
1538 	}
1539 	pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1540 
1541 	/* Get command payload data. */
1542 	if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1543 	    cmd->RequestLen, mode) != cmd->RequestLen) {
1544 		EL(ha, "failed, get_buffer_data\n");
1545 		kmem_free(pkt, pkt_size);
1546 		cmd->Status = EXT_STATUS_COPY_ERR;
1547 		cmd->ResponseLen = 0;
1548 		return;
1549 	}
1550 
1551 	/* Get DMA memory for the IOCB */
1552 	if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1553 	    QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1554 		cmn_err(CE_WARN, "%s(%d): DMA memory "
1555 		    "alloc failed", QL_NAME, ha->instance);
1556 		kmem_free(pkt, pkt_size);
1557 		kmem_free(dma_mem, sizeof (dma_mem_t));
1558 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1559 		cmd->ResponseLen = 0;
1560 		return;
1561 	}
1562 
1563 	/* Copy out going payload data to IOCB DMA buffer. */
1564 	ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1565 	    (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1566 
1567 	/* Sync IOCB DMA buffer. */
1568 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1569 	    DDI_DMA_SYNC_FORDEV);
1570 
1571 	/*
1572 	 * Setup IOCB
1573 	 */
1574 	ct = (ql_ct_iu_preamble_t *)pld;
1575 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1576 		pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1577 		pkt->ms24.entry_count = 1;
1578 
1579 		pkt->ms24.vp_index = ha->vp_index;
1580 
1581 		/* Set loop ID */
1582 		pkt->ms24.n_port_hdl = (uint16_t)
1583 		    (ct->gs_type == GS_TYPE_DIR_SERVER ?
1584 		    LE_16(SNS_24XX_HDL) :
1585 		    LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1586 
1587 		/* Set ISP command timeout. */
1588 		pkt->ms24.timeout = LE_16(120);
1589 
1590 		/* Set cmd/response data segment counts. */
1591 		pkt->ms24.cmd_dseg_count = LE_16(1);
1592 		pkt->ms24.resp_dseg_count = LE_16(1);
1593 
1594 		/* Load ct cmd byte count. */
1595 		pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1596 
1597 		/* Load ct rsp byte count. */
1598 		pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1599 
1600 		long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1601 
1602 		/* Load MS command entry data segments. */
1603 		*long_ptr++ = (uint32_t)
1604 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1605 		*long_ptr++ = (uint32_t)
1606 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1607 		*long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1608 
1609 		/* Load MS response entry data segments. */
1610 		*long_ptr++ = (uint32_t)
1611 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1612 		*long_ptr++ = (uint32_t)
1613 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1614 		*long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1615 
1616 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1617 		    sizeof (ql_mbx_iocb_t));
1618 
1619 		comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1620 		if (comp_status == CS_DATA_UNDERRUN) {
1621 			if ((BE_16(ct->max_residual_size)) == 0) {
1622 				comp_status = CS_COMPLETE;
1623 			}
1624 		}
1625 
1626 		if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1627 		    0) {
1628 			EL(ha, "failed, I/O timeout or "
1629 			    "es=%xh, ss_l=%xh, rval=%xh\n",
1630 			    pkt->sts24.entry_status,
1631 			    pkt->sts24.scsi_status_l, rval);
1632 			kmem_free(pkt, pkt_size);
1633 			ql_free_dma_resource(ha, dma_mem);
1634 			kmem_free(dma_mem, sizeof (dma_mem_t));
1635 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1636 			cmd->ResponseLen = 0;
1637 			return;
1638 		}
1639 	} else {
1640 		pkt->ms.entry_type = MS_TYPE;
1641 		pkt->ms.entry_count = 1;
1642 
1643 		/* Set loop ID */
1644 		loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1645 		    SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1646 		if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1647 			pkt->ms.loop_id_l = LSB(loop_id);
1648 			pkt->ms.loop_id_h = MSB(loop_id);
1649 		} else {
1650 			pkt->ms.loop_id_h = LSB(loop_id);
1651 		}
1652 
1653 		/* Set ISP command timeout. */
1654 		pkt->ms.timeout = LE_16(120);
1655 
1656 		/* Set data segment counts. */
1657 		pkt->ms.cmd_dseg_count_l = 1;
1658 		pkt->ms.total_dseg_count = LE_16(2);
1659 
1660 		/* Response total byte count. */
1661 		pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1662 		pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1663 
1664 		/* Command total byte count. */
1665 		pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1666 		pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1667 
1668 		/* Load command/response data segments. */
1669 		pkt->ms.dseg_0_address[0] = (uint32_t)
1670 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1671 		pkt->ms.dseg_0_address[1] = (uint32_t)
1672 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1673 		pkt->ms.dseg_1_address[0] = (uint32_t)
1674 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
1675 		pkt->ms.dseg_1_address[1] = (uint32_t)
1676 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
1677 
1678 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1679 		    sizeof (ql_mbx_iocb_t));
1680 
1681 		comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1682 		if (comp_status == CS_DATA_UNDERRUN) {
1683 			if ((BE_16(ct->max_residual_size)) == 0) {
1684 				comp_status = CS_COMPLETE;
1685 			}
1686 		}
1687 		if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1688 			EL(ha, "failed, I/O timeout or "
1689 			    "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1690 			kmem_free(pkt, pkt_size);
1691 			ql_free_dma_resource(ha, dma_mem);
1692 			kmem_free(dma_mem, sizeof (dma_mem_t));
1693 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1694 			cmd->ResponseLen = 0;
1695 			return;
1696 		}
1697 	}
1698 
1699 	/* Sync in coming DMA buffer. */
1700 	(void) ddi_dma_sync(dma_mem->dma_handle, 0,
1701 	    pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1702 	/* Copy in coming DMA data. */
1703 	ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1704 	    (uint8_t *)dma_mem->bp, pld_byte_cnt,
1705 	    DDI_DEV_AUTOINCR);
1706 
1707 	/* Copy response payload from DMA buffer to application. */
1708 	if (cmd->ResponseLen != 0) {
1709 		QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1710 		    cmd->ResponseLen);
1711 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
1712 
1713 		/* Send response payload. */
1714 		if (ql_send_buffer_data(pld,
1715 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
1716 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
1717 			EL(ha, "failed, send_buffer_data\n");
1718 			cmd->Status = EXT_STATUS_COPY_ERR;
1719 			cmd->ResponseLen = 0;
1720 		}
1721 	}
1722 
1723 	kmem_free(pkt, pkt_size);
1724 	ql_free_dma_resource(ha, dma_mem);
1725 	kmem_free(dma_mem, sizeof (dma_mem_t));
1726 
1727 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1728 }
1729 
1730 /*
1731  * ql_aen_reg
1732  *	IOCTL management server Asynchronous Event Tracking Enable/Disable.
1733  *
1734  * Input:
1735  *	ha:	adapter state pointer.
1736  *	cmd:	EXT_IOCTL cmd struct pointer.
1737  *	mode:	flags.
1738  *
1739  * Returns:
1740  *	None, request status indicated in cmd->Status.
1741  *
1742  * Context:
1743  *	Kernel context.
1744  */
1745 static void
ql_aen_reg(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1746 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1747 {
1748 	EXT_REG_AEN	reg_struct;
1749 	int		rval = 0;
1750 	ql_xioctl_t	*xp = ha->xioctl;
1751 
1752 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1753 
1754 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &reg_struct,
1755 	    cmd->RequestLen, mode);
1756 
1757 	if (rval == 0) {
1758 		if (reg_struct.Enable) {
1759 			xp->flags |= QL_AEN_TRACKING_ENABLE;
1760 		} else {
1761 			xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1762 			/* Empty the queue. */
1763 			INTR_LOCK(ha);
1764 			xp->aen_q_head = 0;
1765 			xp->aen_q_tail = 0;
1766 			INTR_UNLOCK(ha);
1767 		}
1768 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1769 	} else {
1770 		cmd->Status = EXT_STATUS_COPY_ERR;
1771 		EL(ha, "failed, ddi_copyin\n");
1772 	}
1773 }
1774 
1775 /*
1776  * ql_aen_get
1777  *	IOCTL management server Asynchronous Event Record Transfer.
1778  *
1779  * Input:
1780  *	ha:	adapter state pointer.
1781  *	cmd:	EXT_IOCTL cmd struct pointer.
1782  *	mode:	flags.
1783  *
1784  * Returns:
1785  *	None, request status indicated in cmd->Status.
1786  *
1787  * Context:
1788  *	Kernel context.
1789  */
1790 static void
ql_aen_get(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1791 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1792 {
1793 	uint32_t	out_size;
1794 	EXT_ASYNC_EVENT	*tmp_q;
1795 	EXT_ASYNC_EVENT	aen[EXT_DEF_MAX_AEN_QUEUE];
1796 	uint8_t		i;
1797 	uint8_t		queue_cnt;
1798 	uint8_t		request_cnt;
1799 	ql_xioctl_t	*xp = ha->xioctl;
1800 
1801 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1802 
1803 	/* Compute the number of events that can be returned */
1804 	request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1805 
1806 	if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1807 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1808 		cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1809 		EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1810 		    "Len=%xh\n", request_cnt);
1811 		cmd->ResponseLen = 0;
1812 		return;
1813 	}
1814 
1815 	/* 1st: Make a local copy of the entire queue content. */
1816 	tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1817 	queue_cnt = 0;
1818 
1819 	INTR_LOCK(ha);
1820 	i = xp->aen_q_head;
1821 
1822 	for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1823 		if (tmp_q[i].AsyncEventCode != 0) {
1824 			bcopy(&tmp_q[i], &aen[queue_cnt],
1825 			    sizeof (EXT_ASYNC_EVENT));
1826 			queue_cnt++;
1827 			tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1828 		}
1829 		if (i == xp->aen_q_tail) {
1830 			/* done. */
1831 			break;
1832 		}
1833 		i++;
1834 		if (i == EXT_DEF_MAX_AEN_QUEUE) {
1835 			i = 0;
1836 		}
1837 	}
1838 
1839 	/* Empty the queue. */
1840 	xp->aen_q_head = 0;
1841 	xp->aen_q_tail = 0;
1842 
1843 	INTR_UNLOCK(ha);
1844 
1845 	/* 2nd: Now transfer the queue content to user buffer */
1846 	/* Copy the entire queue to user's buffer. */
1847 	out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1848 	if (queue_cnt == 0) {
1849 		cmd->ResponseLen = 0;
1850 	} else if (ddi_copyout((void *)&aen[0],
1851 	    (void *)(uintptr_t)(cmd->ResponseAdr),
1852 	    out_size, mode) != 0) {
1853 		cmd->Status = EXT_STATUS_COPY_ERR;
1854 		cmd->ResponseLen = 0;
1855 		EL(ha, "failed, ddi_copyout\n");
1856 	} else {
1857 		cmd->ResponseLen = out_size;
1858 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1859 	}
1860 }
1861 
1862 /*
1863  * ql_enqueue_aen
1864  *
1865  * Input:
1866  *	ha:		adapter state pointer.
1867  *	event_code:	async event code of the event to add to queue.
1868  *	payload:	event payload for the queue.
1869  *	INTR_LOCK must be already obtained.
1870  *
1871  * Context:
1872  *	Interrupt or Kernel context, no mailbox commands allowed.
1873  */
1874 void
ql_enqueue_aen(ql_adapter_state_t * ha,uint16_t event_code,void * payload)1875 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1876 {
1877 	uint8_t			new_entry;	/* index to current entry */
1878 	uint16_t		*mbx;
1879 	EXT_ASYNC_EVENT		*aen_queue;
1880 	ql_xioctl_t		*xp = ha->xioctl;
1881 
1882 	QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1883 	    event_code);
1884 
1885 	if (xp == NULL) {
1886 		QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1887 		return;
1888 	}
1889 	aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1890 
1891 	if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1892 		/* Need to change queue pointers to make room. */
1893 
1894 		/* Increment tail for adding new entry. */
1895 		xp->aen_q_tail++;
1896 		if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1897 			xp->aen_q_tail = 0;
1898 		}
1899 		if (xp->aen_q_head == xp->aen_q_tail) {
1900 			/*
1901 			 * We're overwriting the oldest entry, so need to
1902 			 * update the head pointer.
1903 			 */
1904 			xp->aen_q_head++;
1905 			if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1906 				xp->aen_q_head = 0;
1907 			}
1908 		}
1909 	}
1910 
1911 	new_entry = xp->aen_q_tail;
1912 	aen_queue[new_entry].AsyncEventCode = event_code;
1913 
1914 	/* Update payload */
1915 	if (payload != NULL) {
1916 		switch (event_code) {
1917 		case MBA_LIP_OCCURRED:
1918 		case MBA_LOOP_UP:
1919 		case MBA_LOOP_DOWN:
1920 		case MBA_LIP_F8:
1921 		case MBA_LIP_RESET:
1922 		case MBA_PORT_UPDATE:
1923 			break;
1924 		case MBA_RSCN_UPDATE:
1925 			mbx = (uint16_t *)payload;
1926 			/* al_pa */
1927 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1928 			    LSB(mbx[2]);
1929 			/* area */
1930 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1931 			    MSB(mbx[2]);
1932 			/* domain */
1933 			aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1934 			    LSB(mbx[1]);
1935 			/* save in big endian */
1936 			BIG_ENDIAN_24(&aen_queue[new_entry].
1937 			    Payload.RSCN.RSCNInfo[0]);
1938 
1939 			aen_queue[new_entry].Payload.RSCN.AddrFormat =
1940 			    MSB(mbx[1]);
1941 
1942 			break;
1943 		default:
1944 			/* Not supported */
1945 			EL(ha, "failed, event code not supported=%xh\n",
1946 			    event_code);
1947 			aen_queue[new_entry].AsyncEventCode = 0;
1948 			break;
1949 		}
1950 	}
1951 
1952 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1953 }
1954 
1955 /*
1956  * ql_scsi_passthru
1957  *	IOCTL SCSI passthrough.
1958  *
1959  * Input:
1960  *	ha:	adapter state pointer.
1961  *	cmd:	User space SCSI command pointer.
1962  *	mode:	flags.
1963  *
1964  * Returns:
1965  *	None, request status indicated in cmd->Status.
1966  *
1967  * Context:
1968  *	Kernel context.
1969  */
1970 static void
ql_scsi_passthru(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1971 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1972 {
1973 	ql_mbx_iocb_t		*pkt;
1974 	ql_mbx_data_t		mr;
1975 	dma_mem_t		*dma_mem;
1976 	caddr_t			pld;
1977 	uint32_t		pkt_size, pld_size;
1978 	uint16_t		qlnt, retries, cnt, cnt2;
1979 	uint8_t			*name;
1980 	EXT_FC_SCSI_PASSTHRU	*ufc_req;
1981 	EXT_SCSI_PASSTHRU	*usp_req;
1982 	int			rval;
1983 	union _passthru {
1984 		EXT_SCSI_PASSTHRU	sp_cmd;
1985 		EXT_FC_SCSI_PASSTHRU	fc_cmd;
1986 	} pt_req;		/* Passthru request */
1987 	uint32_t		status, sense_sz = 0;
1988 	ql_tgt_t		*tq = NULL;
1989 	EXT_SCSI_PASSTHRU	*sp_req = &pt_req.sp_cmd;
1990 	EXT_FC_SCSI_PASSTHRU	*fc_req = &pt_req.fc_cmd;
1991 
1992 	/* SCSI request struct for SCSI passthrough IOs. */
1993 	struct {
1994 		uint16_t	lun;
1995 		uint16_t	sense_length;	/* Sense buffer size */
1996 		size_t		resid;		/* Residual */
1997 		uint8_t		*cdbp;		/* Requestor's CDB */
1998 		uint8_t		*u_sense;	/* Requestor's sense buffer */
1999 		uint8_t		cdb_len;	/* Requestor's CDB length */
2000 		uint8_t		direction;
2001 	} scsi_req;
2002 
2003 	struct {
2004 		uint8_t		*rsp_info;
2005 		uint8_t		*req_sense_data;
2006 		uint32_t	residual_length;
2007 		uint32_t	rsp_info_length;
2008 		uint32_t	req_sense_length;
2009 		uint16_t	comp_status;
2010 		uint8_t		state_flags_l;
2011 		uint8_t		state_flags_h;
2012 		uint8_t		scsi_status_l;
2013 		uint8_t		scsi_status_h;
2014 	} sts;
2015 
2016 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2017 
2018 	/* Verify Sub Code and set cnt to needed request size. */
2019 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2020 		pld_size = sizeof (EXT_SCSI_PASSTHRU);
2021 	} else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2022 		pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2023 	} else {
2024 		EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2025 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2026 		cmd->ResponseLen = 0;
2027 		return;
2028 	}
2029 
2030 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2031 	if (dma_mem == NULL) {
2032 		EL(ha, "failed, kmem_zalloc\n");
2033 		cmd->Status = EXT_STATUS_NO_MEMORY;
2034 		cmd->ResponseLen = 0;
2035 		return;
2036 	}
2037 	/*  Verify the size of and copy in the passthru request structure. */
2038 	if (cmd->RequestLen != pld_size) {
2039 		/* Return error */
2040 		EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2041 		    cmd->RequestLen, pld_size);
2042 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2043 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2044 		cmd->ResponseLen = 0;
2045 		return;
2046 	}
2047 
2048 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2049 	    pld_size, mode) != 0) {
2050 		EL(ha, "failed, ddi_copyin\n");
2051 		cmd->Status = EXT_STATUS_COPY_ERR;
2052 		cmd->ResponseLen = 0;
2053 		return;
2054 	}
2055 
2056 	/*
2057 	 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2058 	 * request data structure.
2059 	 */
2060 	if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2061 		scsi_req.lun = sp_req->TargetAddr.Lun;
2062 		scsi_req.sense_length = sizeof (sp_req->SenseData);
2063 		scsi_req.cdbp = &sp_req->Cdb[0];
2064 		scsi_req.cdb_len = sp_req->CdbLength;
2065 		scsi_req.direction = sp_req->Direction;
2066 		usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2067 		scsi_req.u_sense = &usp_req->SenseData[0];
2068 		cmd->DetailStatus = EXT_DSTATUS_TARGET;
2069 
2070 		qlnt = QLNT_PORT;
2071 		name = (uint8_t *)&sp_req->TargetAddr.Target;
2072 		QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2073 		    ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2074 		tq = ql_find_port(ha, name, qlnt);
2075 	} else {
2076 		/*
2077 		 * Must be FC PASSTHRU, verified above.
2078 		 */
2079 		if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2080 			qlnt = QLNT_PORT;
2081 			name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2082 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2083 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2084 			    ha->instance, cmd->SubCode, name[0], name[1],
2085 			    name[2], name[3], name[4], name[5], name[6],
2086 			    name[7]);
2087 			tq = ql_find_port(ha, name, qlnt);
2088 		} else if (fc_req->FCScsiAddr.DestType ==
2089 		    EXT_DEF_DESTTYPE_WWNN) {
2090 			qlnt = QLNT_NODE;
2091 			name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2092 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2093 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2094 			    ha->instance, cmd->SubCode, name[0], name[1],
2095 			    name[2], name[3], name[4], name[5], name[6],
2096 			    name[7]);
2097 			tq = ql_find_port(ha, name, qlnt);
2098 		} else if (fc_req->FCScsiAddr.DestType ==
2099 		    EXT_DEF_DESTTYPE_PORTID) {
2100 			qlnt = QLNT_PID;
2101 			name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2102 			QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2103 			    "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2104 			    name[0], name[1], name[2]);
2105 			tq = ql_find_port(ha, name, qlnt);
2106 		} else {
2107 			EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2108 			    cmd->SubCode, fc_req->FCScsiAddr.DestType);
2109 			cmd->Status = EXT_STATUS_INVALID_PARAM;
2110 			cmd->ResponseLen = 0;
2111 			return;
2112 		}
2113 		scsi_req.lun = fc_req->FCScsiAddr.Lun;
2114 		scsi_req.sense_length = sizeof (fc_req->SenseData);
2115 		scsi_req.cdbp = &sp_req->Cdb[0];
2116 		scsi_req.cdb_len = sp_req->CdbLength;
2117 		ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2118 		scsi_req.u_sense = &ufc_req->SenseData[0];
2119 		scsi_req.direction = fc_req->Direction;
2120 	}
2121 
2122 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2123 		EL(ha, "failed, fc_port not found\n");
2124 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2125 		cmd->ResponseLen = 0;
2126 		return;
2127 	}
2128 
2129 	if (tq->flags & TQF_NEED_AUTHENTICATION) {
2130 		EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2131 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2132 		cmd->ResponseLen = 0;
2133 		return;
2134 	}
2135 
2136 	/* Allocate command block. */
2137 	if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2138 	    scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2139 	    cmd->ResponseLen) {
2140 		pld_size = cmd->ResponseLen;
2141 		pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2142 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2143 		if (pkt == NULL) {
2144 			EL(ha, "failed, kmem_zalloc\n");
2145 			cmd->Status = EXT_STATUS_NO_MEMORY;
2146 			cmd->ResponseLen = 0;
2147 			return;
2148 		}
2149 		pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2150 
2151 		/* Get DMA memory for the IOCB */
2152 		if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2153 		    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2154 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2155 			    "alloc failed", QL_NAME, ha->instance);
2156 			kmem_free(pkt, pkt_size);
2157 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2158 			cmd->ResponseLen = 0;
2159 			return;
2160 		}
2161 
2162 		if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2163 			scsi_req.direction = (uint8_t)
2164 			    (CFG_IST(ha, CFG_CTRL_24258081) ?
2165 			    CF_RD : CF_DATA_IN | CF_STAG);
2166 		} else {
2167 			scsi_req.direction = (uint8_t)
2168 			    (CFG_IST(ha, CFG_CTRL_24258081) ?
2169 			    CF_WR : CF_DATA_OUT | CF_STAG);
2170 			cmd->ResponseLen = 0;
2171 
2172 			/* Get command payload. */
2173 			if (ql_get_buffer_data(
2174 			    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2175 			    pld, pld_size, mode) != pld_size) {
2176 				EL(ha, "failed, get_buffer_data\n");
2177 				cmd->Status = EXT_STATUS_COPY_ERR;
2178 
2179 				kmem_free(pkt, pkt_size);
2180 				ql_free_dma_resource(ha, dma_mem);
2181 				kmem_free(dma_mem, sizeof (dma_mem_t));
2182 				return;
2183 			}
2184 
2185 			/* Copy out going data to DMA buffer. */
2186 			ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2187 			    (uint8_t *)dma_mem->bp, pld_size,
2188 			    DDI_DEV_AUTOINCR);
2189 
2190 			/* Sync DMA buffer. */
2191 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2192 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
2193 		}
2194 	} else {
2195 		scsi_req.direction = (uint8_t)
2196 		    (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG);
2197 		cmd->ResponseLen = 0;
2198 
2199 		pkt_size = sizeof (ql_mbx_iocb_t);
2200 		pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2201 		if (pkt == NULL) {
2202 			EL(ha, "failed, kmem_zalloc-2\n");
2203 			cmd->Status = EXT_STATUS_NO_MEMORY;
2204 			return;
2205 		}
2206 		pld = NULL;
2207 		pld_size = 0;
2208 	}
2209 
2210 	/* retries = ha->port_down_retry_count; */
2211 	retries = 1;
2212 	cmd->Status = EXT_STATUS_OK;
2213 	cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2214 
2215 	QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2216 	QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2217 
2218 	do {
2219 		if (DRIVER_SUSPENDED(ha)) {
2220 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2221 			break;
2222 		}
2223 
2224 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2225 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2226 			pkt->cmd24.entry_count = 1;
2227 
2228 			/* Set LUN number */
2229 			pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2230 			pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2231 
2232 			/* Set N_port handle */
2233 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2234 
2235 			/* Set VP Index */
2236 			pkt->cmd24.vp_index = ha->vp_index;
2237 
2238 			/* Set target ID */
2239 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2240 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
2241 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2242 
2243 			/* Set ISP command timeout. */
2244 			pkt->cmd24.timeout = (uint16_t)LE_16(15);
2245 
2246 			/* Load SCSI CDB */
2247 			ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2248 			    pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2249 			    DDI_DEV_AUTOINCR);
2250 			for (cnt = 0; cnt < MAX_CMDSZ;
2251 			    cnt = (uint16_t)(cnt + 4)) {
2252 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2253 				    + cnt, 4);
2254 			}
2255 
2256 			/* Set tag queue control flags */
2257 			pkt->cmd24.task = TA_STAG;
2258 
2259 			if (pld_size) {
2260 				/* Set transfer direction. */
2261 				pkt->cmd24.control_flags = scsi_req.direction;
2262 
2263 				/* Set data segment count. */
2264 				pkt->cmd24.dseg_count = LE_16(1);
2265 
2266 				/* Load total byte count. */
2267 				pkt->cmd24.total_byte_count = LE_32(pld_size);
2268 
2269 				/* Load data descriptor. */
2270 				pkt->cmd24.dseg_0_address[0] = (uint32_t)
2271 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2272 				pkt->cmd24.dseg_0_address[1] = (uint32_t)
2273 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2274 				pkt->cmd24.dseg_0_length = LE_32(pld_size);
2275 			}
2276 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2277 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2278 			pkt->cmd3.entry_count = 1;
2279 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2280 				pkt->cmd3.target_l = LSB(tq->loop_id);
2281 				pkt->cmd3.target_h = MSB(tq->loop_id);
2282 			} else {
2283 				pkt->cmd3.target_h = LSB(tq->loop_id);
2284 			}
2285 			pkt->cmd3.lun_l = LSB(scsi_req.lun);
2286 			pkt->cmd3.lun_h = MSB(scsi_req.lun);
2287 			pkt->cmd3.control_flags_l = scsi_req.direction;
2288 			pkt->cmd3.timeout = LE_16(15);
2289 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2290 				pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2291 			}
2292 			if (pld_size) {
2293 				pkt->cmd3.dseg_count = LE_16(1);
2294 				pkt->cmd3.byte_count = LE_32(pld_size);
2295 				pkt->cmd3.dseg_0_address[0] = (uint32_t)
2296 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2297 				pkt->cmd3.dseg_0_address[1] = (uint32_t)
2298 				    LE_32(MSD(dma_mem->cookie.dmac_laddress));
2299 				pkt->cmd3.dseg_0_length = LE_32(pld_size);
2300 			}
2301 		} else {
2302 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2303 			pkt->cmd.entry_count = 1;
2304 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2305 				pkt->cmd.target_l = LSB(tq->loop_id);
2306 				pkt->cmd.target_h = MSB(tq->loop_id);
2307 			} else {
2308 				pkt->cmd.target_h = LSB(tq->loop_id);
2309 			}
2310 			pkt->cmd.lun_l = LSB(scsi_req.lun);
2311 			pkt->cmd.lun_h = MSB(scsi_req.lun);
2312 			pkt->cmd.control_flags_l = scsi_req.direction;
2313 			pkt->cmd.timeout = LE_16(15);
2314 			for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2315 				pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2316 			}
2317 			if (pld_size) {
2318 				pkt->cmd.dseg_count = LE_16(1);
2319 				pkt->cmd.byte_count = LE_32(pld_size);
2320 				pkt->cmd.dseg_0_address = (uint32_t)
2321 				    LE_32(LSD(dma_mem->cookie.dmac_laddress));
2322 				pkt->cmd.dseg_0_length = LE_32(pld_size);
2323 			}
2324 		}
2325 		/* Go issue command and wait for completion. */
2326 		QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2327 		QL_DUMP_9(pkt, 8, pkt_size);
2328 
2329 		status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2330 
2331 		if (pld_size) {
2332 			/* Sync in coming DMA buffer. */
2333 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
2334 			    dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2335 			/* Copy in coming DMA data. */
2336 			ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2337 			    (uint8_t *)dma_mem->bp, pld_size,
2338 			    DDI_DEV_AUTOINCR);
2339 		}
2340 
2341 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2342 			pkt->sts24.entry_status = (uint8_t)
2343 			    (pkt->sts24.entry_status & 0x3c);
2344 		} else {
2345 			pkt->sts.entry_status = (uint8_t)
2346 			    (pkt->sts.entry_status & 0x7e);
2347 		}
2348 
2349 		if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2350 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2351 			    pkt->sts.entry_status, tq->d_id.b24);
2352 			status = QL_FUNCTION_PARAMETER_ERROR;
2353 		}
2354 
2355 		sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
2356 		    LE_16(pkt->sts24.comp_status) :
2357 		    LE_16(pkt->sts.comp_status));
2358 
2359 		/*
2360 		 * We have verified about all the request that can be so far.
2361 		 * Now we need to start verification of our ability to
2362 		 * actually issue the CDB.
2363 		 */
2364 		if (DRIVER_SUSPENDED(ha)) {
2365 			sts.comp_status = CS_LOOP_DOWN_ABORT;
2366 			break;
2367 		} else if (status == QL_SUCCESS &&
2368 		    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2369 		    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2370 			EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2371 			if (tq->flags & TQF_FABRIC_DEVICE) {
2372 				rval = ql_login_fport(ha, tq, tq->loop_id,
2373 				    LFF_NO_PLOGI, &mr);
2374 				if (rval != QL_SUCCESS) {
2375 					EL(ha, "failed, login_fport=%xh, "
2376 					    "d_id=%xh\n", rval, tq->d_id.b24);
2377 				}
2378 			} else {
2379 				rval = ql_login_lport(ha, tq, tq->loop_id,
2380 				    LLF_NONE);
2381 				if (rval != QL_SUCCESS) {
2382 					EL(ha, "failed, login_lport=%xh, "
2383 					    "d_id=%xh\n", rval, tq->d_id.b24);
2384 				}
2385 			}
2386 		} else {
2387 			break;
2388 		}
2389 
2390 		bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2391 
2392 	} while (retries--);
2393 
2394 	if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2395 		/* Cannot issue command now, maybe later */
2396 		EL(ha, "failed, suspended\n");
2397 		kmem_free(pkt, pkt_size);
2398 		ql_free_dma_resource(ha, dma_mem);
2399 		kmem_free(dma_mem, sizeof (dma_mem_t));
2400 		cmd->Status = EXT_STATUS_SUSPENDED;
2401 		cmd->ResponseLen = 0;
2402 		return;
2403 	}
2404 
2405 	if (status != QL_SUCCESS) {
2406 		/* Command error */
2407 		EL(ha, "failed, I/O\n");
2408 		kmem_free(pkt, pkt_size);
2409 		ql_free_dma_resource(ha, dma_mem);
2410 		kmem_free(dma_mem, sizeof (dma_mem_t));
2411 		cmd->Status = EXT_STATUS_ERR;
2412 		cmd->DetailStatus = status;
2413 		cmd->ResponseLen = 0;
2414 		return;
2415 	}
2416 
2417 	/* Setup status. */
2418 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
2419 		sts.scsi_status_l = pkt->sts24.scsi_status_l;
2420 		sts.scsi_status_h = pkt->sts24.scsi_status_h;
2421 
2422 		/* Setup residuals. */
2423 		sts.residual_length = LE_32(pkt->sts24.residual_length);
2424 
2425 		/* Setup state flags. */
2426 		sts.state_flags_l = pkt->sts24.state_flags_l;
2427 		sts.state_flags_h = pkt->sts24.state_flags_h;
2428 		if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2429 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2430 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2431 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2432 		} else {
2433 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2434 			    SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2435 			    SF_GOT_STATUS);
2436 		}
2437 		if (scsi_req.direction & CF_WR) {
2438 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2439 			    SF_DATA_OUT);
2440 		} else if (scsi_req.direction & CF_RD) {
2441 			sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2442 			    SF_DATA_IN);
2443 		}
2444 		sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2445 
2446 		/* Setup FCP response info. */
2447 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2448 		    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2449 		sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2450 		for (cnt = 0; cnt < sts.rsp_info_length;
2451 		    cnt = (uint16_t)(cnt + 4)) {
2452 			ql_chg_endian(sts.rsp_info + cnt, 4);
2453 		}
2454 
2455 		/* Setup sense data. */
2456 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2457 			sts.req_sense_length =
2458 			    LE_32(pkt->sts24.fcp_sense_length);
2459 			sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2460 			    SF_ARQ_DONE);
2461 		} else {
2462 			sts.req_sense_length = 0;
2463 		}
2464 		sts.req_sense_data =
2465 		    &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2466 		cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2467 		    (uintptr_t)sts.req_sense_data);
2468 		for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2469 			ql_chg_endian(sts.req_sense_data + cnt, 4);
2470 		}
2471 	} else {
2472 		sts.scsi_status_l = pkt->sts.scsi_status_l;
2473 		sts.scsi_status_h = pkt->sts.scsi_status_h;
2474 
2475 		/* Setup residuals. */
2476 		sts.residual_length = LE_32(pkt->sts.residual_length);
2477 
2478 		/* Setup state flags. */
2479 		sts.state_flags_l = pkt->sts.state_flags_l;
2480 		sts.state_flags_h = pkt->sts.state_flags_h;
2481 
2482 		/* Setup FCP response info. */
2483 		sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2484 		    LE_16(pkt->sts.rsp_info_length) : 0;
2485 		sts.rsp_info = &pkt->sts.rsp_info[0];
2486 
2487 		/* Setup sense data. */
2488 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2489 		    LE_16(pkt->sts.req_sense_length) : 0;
2490 		sts.req_sense_data = &pkt->sts.req_sense_data[0];
2491 	}
2492 
2493 	QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2494 	QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2495 
2496 	switch (sts.comp_status) {
2497 	case CS_INCOMPLETE:
2498 	case CS_ABORTED:
2499 	case CS_DEVICE_UNAVAILABLE:
2500 	case CS_PORT_UNAVAILABLE:
2501 	case CS_PORT_LOGGED_OUT:
2502 	case CS_PORT_CONFIG_CHG:
2503 	case CS_PORT_BUSY:
2504 	case CS_LOOP_DOWN_ABORT:
2505 		cmd->Status = EXT_STATUS_BUSY;
2506 		break;
2507 	case CS_RESET:
2508 	case CS_QUEUE_FULL:
2509 		cmd->Status = EXT_STATUS_ERR;
2510 		break;
2511 	case CS_TIMEOUT:
2512 		cmd->Status = EXT_STATUS_ERR;
2513 		break;
2514 	case CS_DATA_OVERRUN:
2515 		cmd->Status = EXT_STATUS_DATA_OVERRUN;
2516 		break;
2517 	case CS_DATA_UNDERRUN:
2518 		cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2519 		break;
2520 	}
2521 
2522 	/*
2523 	 * If non data transfer commands fix tranfer counts.
2524 	 */
2525 	if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2526 	    scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2527 	    scsi_req.cdbp[0] == SCMD_SEEK ||
2528 	    scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2529 	    scsi_req.cdbp[0] == SCMD_RESERVE ||
2530 	    scsi_req.cdbp[0] == SCMD_RELEASE ||
2531 	    scsi_req.cdbp[0] == SCMD_START_STOP ||
2532 	    scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2533 	    scsi_req.cdbp[0] == SCMD_VERIFY ||
2534 	    scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2535 	    scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2536 	    scsi_req.cdbp[0] == SCMD_SPACE ||
2537 	    scsi_req.cdbp[0] == SCMD_ERASE ||
2538 	    (scsi_req.cdbp[0] == SCMD_FORMAT &&
2539 	    (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2540 		/*
2541 		 * Non data transfer command, clear sts_entry residual
2542 		 * length.
2543 		 */
2544 		sts.residual_length = 0;
2545 		cmd->ResponseLen = 0;
2546 		if (sts.comp_status == CS_DATA_UNDERRUN) {
2547 			sts.comp_status = CS_COMPLETE;
2548 			cmd->Status = EXT_STATUS_OK;
2549 		}
2550 	} else {
2551 		cmd->ResponseLen = pld_size;
2552 	}
2553 
2554 	/* Correct ISP completion status */
2555 	if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2556 	    (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2557 		QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2558 		    ha->instance);
2559 		scsi_req.resid = 0;
2560 	} else if (sts.comp_status == CS_DATA_UNDERRUN) {
2561 		QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2562 		    ha->instance);
2563 		scsi_req.resid = sts.residual_length;
2564 		if (sts.scsi_status_h & FCP_RESID_UNDER) {
2565 			cmd->Status = (uint32_t)EXT_STATUS_OK;
2566 
2567 			cmd->ResponseLen = (uint32_t)
2568 			    (pld_size - scsi_req.resid);
2569 		} else {
2570 			EL(ha, "failed, Transfer ERROR\n");
2571 			cmd->Status = EXT_STATUS_ERR;
2572 			cmd->ResponseLen = 0;
2573 		}
2574 	} else {
2575 		QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2576 		    "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2577 		    tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2578 		    sts.scsi_status_l);
2579 
2580 		scsi_req.resid = pld_size;
2581 		/*
2582 		 * Handle residual count on SCSI check
2583 		 * condition.
2584 		 *
2585 		 * - If Residual Under / Over is set, use the
2586 		 *   Residual Transfer Length field in IOCB.
2587 		 * - If Residual Under / Over is not set, and
2588 		 *   Transferred Data bit is set in State Flags
2589 		 *   field of IOCB, report residual value of 0
2590 		 *   (you may want to do this for tape
2591 		 *   Write-type commands only). This takes care
2592 		 *   of logical end of tape problem and does
2593 		 *   not break Unit Attention.
2594 		 * - If Residual Under / Over is not set, and
2595 		 *   Transferred Data bit is not set in State
2596 		 *   Flags, report residual value equal to
2597 		 *   original data transfer length.
2598 		 */
2599 		if (sts.scsi_status_l & STATUS_CHECK) {
2600 			cmd->Status = EXT_STATUS_SCSI_STATUS;
2601 			cmd->DetailStatus = sts.scsi_status_l;
2602 			if (sts.scsi_status_h &
2603 			    (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2604 				scsi_req.resid = sts.residual_length;
2605 			} else if (sts.state_flags_h &
2606 			    STATE_XFERRED_DATA) {
2607 				scsi_req.resid = 0;
2608 			}
2609 		}
2610 	}
2611 
2612 	if (sts.scsi_status_l & STATUS_CHECK &&
2613 	    sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2614 	    sts.req_sense_length) {
2615 		/*
2616 		 * Check condition with vaild sense data flag set and sense
2617 		 * length != 0
2618 		 */
2619 		if (sts.req_sense_length > scsi_req.sense_length) {
2620 			sense_sz = scsi_req.sense_length;
2621 		} else {
2622 			sense_sz = sts.req_sense_length;
2623 		}
2624 
2625 		EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2626 		    tq->d_id.b24);
2627 		QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2628 
2629 		if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2630 		    (size_t)sense_sz, mode) != 0) {
2631 			EL(ha, "failed, request sense ddi_copyout\n");
2632 		}
2633 
2634 		cmd->Status = EXT_STATUS_SCSI_STATUS;
2635 		cmd->DetailStatus = sts.scsi_status_l;
2636 	}
2637 
2638 	/* Copy response payload from DMA buffer to application. */
2639 	if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2640 	    cmd->ResponseLen != 0) {
2641 		QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2642 		    "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2643 		    scsi_req.resid, pld_size, cmd->ResponseLen);
2644 		QL_DUMP_9(pld, 8, cmd->ResponseLen);
2645 
2646 		/* Send response payload. */
2647 		if (ql_send_buffer_data(pld,
2648 		    (caddr_t)(uintptr_t)cmd->ResponseAdr,
2649 		    cmd->ResponseLen, mode) != cmd->ResponseLen) {
2650 			EL(ha, "failed, send_buffer_data\n");
2651 			cmd->Status = EXT_STATUS_COPY_ERR;
2652 			cmd->ResponseLen = 0;
2653 		}
2654 	}
2655 
2656 	if (cmd->Status != EXT_STATUS_OK) {
2657 		EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2658 		    "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2659 	} else {
2660 		/*EMPTY*/
2661 		QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2662 		    ha->instance, cmd->ResponseLen);
2663 	}
2664 
2665 	kmem_free(pkt, pkt_size);
2666 	ql_free_dma_resource(ha, dma_mem);
2667 	kmem_free(dma_mem, sizeof (dma_mem_t));
2668 }
2669 
2670 /*
2671  * ql_wwpn_to_scsiaddr
2672  *
2673  * Input:
2674  *	ha:	adapter state pointer.
2675  *	cmd:	EXT_IOCTL cmd struct pointer.
2676  *	mode:	flags.
2677  *
2678  * Context:
2679  *	Kernel context.
2680  */
2681 static void
ql_wwpn_to_scsiaddr(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2682 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2683 {
2684 	int		status;
2685 	uint8_t		wwpn[EXT_DEF_WWN_NAME_SIZE];
2686 	EXT_SCSI_ADDR	*tmp_addr;
2687 	ql_tgt_t	*tq;
2688 
2689 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2690 
2691 	if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2692 		/* Return error */
2693 		EL(ha, "incorrect RequestLen\n");
2694 		cmd->Status = EXT_STATUS_INVALID_PARAM;
2695 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2696 		return;
2697 	}
2698 
2699 	status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2700 	    cmd->RequestLen, mode);
2701 
2702 	if (status != 0) {
2703 		cmd->Status = EXT_STATUS_COPY_ERR;
2704 		EL(ha, "failed, ddi_copyin\n");
2705 		return;
2706 	}
2707 
2708 	tq = ql_find_port(ha, wwpn, QLNT_PORT);
2709 
2710 	if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2711 		/* no matching device */
2712 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2713 		EL(ha, "failed, device not found\n");
2714 		return;
2715 	}
2716 
2717 	/* Copy out the IDs found.  For now we can only return target ID. */
2718 	tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2719 
2720 	status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2721 
2722 	if (status != 0) {
2723 		cmd->Status = EXT_STATUS_COPY_ERR;
2724 		EL(ha, "failed, ddi_copyout\n");
2725 	} else {
2726 		cmd->Status = EXT_STATUS_OK;
2727 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2728 	}
2729 }
2730 
2731 /*
2732  * ql_host_idx
2733  *	Gets host order index.
2734  *
2735  * Input:
2736  *	ha:	adapter state pointer.
2737  *	cmd:	EXT_IOCTL cmd struct pointer.
2738  *	mode:	flags.
2739  *
2740  * Returns:
2741  *	None, request status indicated in cmd->Status.
2742  *
2743  * Context:
2744  *	Kernel context.
2745  */
2746 static void
ql_host_idx(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2747 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2748 {
2749 	uint16_t	idx;
2750 
2751 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2752 
2753 	if (cmd->ResponseLen < sizeof (uint16_t)) {
2754 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2755 		cmd->DetailStatus = sizeof (uint16_t);
2756 		EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2757 		cmd->ResponseLen = 0;
2758 		return;
2759 	}
2760 
2761 	idx = (uint16_t)ha->instance;
2762 
2763 	if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2764 	    sizeof (uint16_t), mode) != 0) {
2765 		cmd->Status = EXT_STATUS_COPY_ERR;
2766 		cmd->ResponseLen = 0;
2767 		EL(ha, "failed, ddi_copyout\n");
2768 	} else {
2769 		cmd->ResponseLen = sizeof (uint16_t);
2770 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2771 	}
2772 }
2773 
2774 /*
2775  * ql_host_drvname
2776  *	Gets host driver name
2777  *
2778  * Input:
2779  *	ha:	adapter state pointer.
2780  *	cmd:	EXT_IOCTL cmd struct pointer.
2781  *	mode:	flags.
2782  *
2783  * Returns:
2784  *	None, request status indicated in cmd->Status.
2785  *
2786  * Context:
2787  *	Kernel context.
2788  */
2789 static void
ql_host_drvname(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2790 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2791 {
2792 
2793 	char		drvname[] = QL_NAME;
2794 	uint32_t	qlnamelen;
2795 
2796 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2797 
2798 	qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2799 
2800 	if (cmd->ResponseLen < qlnamelen) {
2801 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2802 		cmd->DetailStatus = qlnamelen;
2803 		EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2804 		    cmd->ResponseLen, qlnamelen);
2805 		cmd->ResponseLen = 0;
2806 		return;
2807 	}
2808 
2809 	if (ddi_copyout((void *)&drvname,
2810 	    (void *)(uintptr_t)(cmd->ResponseAdr),
2811 	    qlnamelen, mode) != 0) {
2812 		cmd->Status = EXT_STATUS_COPY_ERR;
2813 		cmd->ResponseLen = 0;
2814 		EL(ha, "failed, ddi_copyout\n");
2815 	} else {
2816 		cmd->ResponseLen = qlnamelen-1;
2817 	}
2818 
2819 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2820 }
2821 
2822 /*
2823  * ql_read_nvram
2824  *	Get NVRAM contents.
2825  *
2826  * Input:
2827  *	ha:	adapter state pointer.
2828  *	cmd:	EXT_IOCTL cmd struct pointer.
2829  *	mode:	flags.
2830  *
2831  * Returns:
2832  *	None, request status indicated in cmd->Status.
2833  *
2834  * Context:
2835  *	Kernel context.
2836  */
2837 static void
ql_read_nvram(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2838 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2839 {
2840 
2841 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2842 
2843 	if (cmd->ResponseLen < ha->nvram_cache->size) {
2844 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2845 		cmd->DetailStatus = ha->nvram_cache->size;
2846 		EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2847 		    cmd->ResponseLen);
2848 		cmd->ResponseLen = 0;
2849 		return;
2850 	}
2851 
2852 	/* Get NVRAM data. */
2853 	if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2854 	    mode) != 0) {
2855 		cmd->Status = EXT_STATUS_COPY_ERR;
2856 		cmd->ResponseLen = 0;
2857 		EL(ha, "failed, copy error\n");
2858 	} else {
2859 		cmd->ResponseLen = ha->nvram_cache->size;
2860 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2861 	}
2862 }
2863 
2864 /*
2865  * ql_write_nvram
2866  *	Loads NVRAM contents.
2867  *
2868  * Input:
2869  *	ha:	adapter state pointer.
2870  *	cmd:	EXT_IOCTL cmd struct pointer.
2871  *	mode:	flags.
2872  *
2873  * Returns:
2874  *	None, request status indicated in cmd->Status.
2875  *
2876  * Context:
2877  *	Kernel context.
2878  */
2879 static void
ql_write_nvram(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2880 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2881 {
2882 
2883 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2884 
2885 	if (cmd->RequestLen < ha->nvram_cache->size) {
2886 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2887 		cmd->DetailStatus = ha->nvram_cache->size;
2888 		EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2889 		    cmd->RequestLen);
2890 		return;
2891 	}
2892 
2893 	/* Load NVRAM data. */
2894 	if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2895 	    mode) != 0) {
2896 		cmd->Status = EXT_STATUS_COPY_ERR;
2897 		EL(ha, "failed, copy error\n");
2898 	} else {
2899 		/*EMPTY*/
2900 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2901 	}
2902 }
2903 
2904 /*
2905  * ql_write_vpd
2906  *	Loads VPD contents.
2907  *
2908  * Input:
2909  *	ha:	adapter state pointer.
2910  *	cmd:	EXT_IOCTL cmd struct pointer.
2911  *	mode:	flags.
2912  *
2913  * Returns:
2914  *	None, request status indicated in cmd->Status.
2915  *
2916  * Context:
2917  *	Kernel context.
2918  */
2919 static void
ql_write_vpd(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2920 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2921 {
2922 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2923 
2924 	int32_t		rval = 0;
2925 
2926 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2927 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2928 		EL(ha, "failed, invalid request for HBA\n");
2929 		return;
2930 	}
2931 
2932 	if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2933 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2934 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2935 		EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2936 		    cmd->RequestLen);
2937 		return;
2938 	}
2939 
2940 	/* Load VPD data. */
2941 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2942 	    mode)) != 0) {
2943 		cmd->Status = EXT_STATUS_COPY_ERR;
2944 		cmd->DetailStatus = rval;
2945 		EL(ha, "failed, errno=%x\n", rval);
2946 	} else {
2947 		/*EMPTY*/
2948 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2949 	}
2950 }
2951 
2952 /*
2953  * ql_read_vpd
2954  *	Dumps VPD contents.
2955  *
2956  * Input:
2957  *	ha:	adapter state pointer.
2958  *	cmd:	EXT_IOCTL cmd struct pointer.
2959  *	mode:	flags.
2960  *
2961  * Returns:
2962  *	None, request status indicated in cmd->Status.
2963  *
2964  * Context:
2965  *	Kernel context.
2966  */
2967 static void
ql_read_vpd(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2968 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2969 {
2970 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2971 
2972 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2973 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
2974 		EL(ha, "failed, invalid request for HBA\n");
2975 		return;
2976 	}
2977 
2978 	if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2979 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2980 		cmd->DetailStatus = QL_24XX_VPD_SIZE;
2981 		EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2982 		    cmd->ResponseLen);
2983 		return;
2984 	}
2985 
2986 	/* Dump VPD data. */
2987 	if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2988 	    mode)) != 0) {
2989 		cmd->Status = EXT_STATUS_COPY_ERR;
2990 		EL(ha, "failed,\n");
2991 	} else {
2992 		/*EMPTY*/
2993 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2994 	}
2995 }
2996 
2997 /*
2998  * ql_get_fcache
2999  *	Dumps flash cache contents.
3000  *
3001  * Input:
3002  *	ha:	adapter state pointer.
3003  *	cmd:	EXT_IOCTL cmd struct pointer.
3004  *	mode:	flags.
3005  *
3006  * Returns:
3007  *	None, request status indicated in cmd->Status.
3008  *
3009  * Context:
3010  *	Kernel context.
3011  */
3012 static void
ql_get_fcache(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3013 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3014 {
3015 	uint32_t	bsize, boff, types, cpsize, hsize;
3016 	ql_fcache_t	*fptr;
3017 
3018 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3019 
3020 	CACHE_LOCK(ha);
3021 
3022 	if (ha->fcache == NULL) {
3023 		CACHE_UNLOCK(ha);
3024 		cmd->Status = EXT_STATUS_ERR;
3025 		EL(ha, "failed, adapter fcache not setup\n");
3026 		return;
3027 	}
3028 
3029 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
3030 		bsize = 100;
3031 	} else {
3032 		bsize = 400;
3033 	}
3034 
3035 	if (cmd->ResponseLen < bsize) {
3036 		CACHE_UNLOCK(ha);
3037 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3038 		cmd->DetailStatus = bsize;
3039 		EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3040 		    bsize, cmd->ResponseLen);
3041 		return;
3042 	}
3043 
3044 	boff = 0;
3045 	bsize = 0;
3046 	fptr = ha->fcache;
3047 
3048 	/*
3049 	 * For backwards compatibility, get one of each image type
3050 	 */
3051 	types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3052 	while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3053 		/* Get the next image */
3054 		if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3055 
3056 			cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3057 
3058 			if (ddi_copyout(fptr->buf,
3059 			    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3060 			    cpsize, mode) != 0) {
3061 				CACHE_UNLOCK(ha);
3062 				EL(ha, "ddicopy failed, done\n");
3063 				cmd->Status = EXT_STATUS_COPY_ERR;
3064 				cmd->DetailStatus = 0;
3065 				return;
3066 			}
3067 			boff += 100;
3068 			bsize += cpsize;
3069 			types &= ~(fptr->type);
3070 		}
3071 	}
3072 
3073 	/*
3074 	 * Get the firmware image -- it needs to be last in the
3075 	 * buffer at offset 300 for backwards compatibility. Also for
3076 	 * backwards compatibility, the pci header is stripped off.
3077 	 */
3078 	if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3079 
3080 		hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3081 		if (hsize > fptr->buflen) {
3082 			CACHE_UNLOCK(ha);
3083 			EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3084 			    hsize, fptr->buflen);
3085 			cmd->Status = EXT_STATUS_COPY_ERR;
3086 			cmd->DetailStatus = 0;
3087 			return;
3088 		}
3089 
3090 		cpsize = ((fptr->buflen - hsize) < 100 ?
3091 		    fptr->buflen - hsize : 100);
3092 
3093 		if (ddi_copyout(fptr->buf+hsize,
3094 		    (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3095 		    cpsize, mode) != 0) {
3096 			CACHE_UNLOCK(ha);
3097 			EL(ha, "fw ddicopy failed, done\n");
3098 			cmd->Status = EXT_STATUS_COPY_ERR;
3099 			cmd->DetailStatus = 0;
3100 			return;
3101 		}
3102 		bsize += 100;
3103 	}
3104 
3105 	CACHE_UNLOCK(ha);
3106 	cmd->Status = EXT_STATUS_OK;
3107 	cmd->DetailStatus = bsize;
3108 
3109 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3110 }
3111 
3112 /*
3113  * ql_get_fcache_ex
3114  *	Dumps flash cache contents.
3115  *
3116  * Input:
3117  *	ha:	adapter state pointer.
3118  *	cmd:	EXT_IOCTL cmd struct pointer.
3119  *	mode:	flags.
3120  *
3121  * Returns:
3122  *	None, request status indicated in cmd->Status.
3123  *
3124  * Context:
3125  *	Kernel context.
3126  */
3127 static void
ql_get_fcache_ex(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3128 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3129 {
3130 	uint32_t	bsize = 0;
3131 	uint32_t	boff = 0;
3132 	ql_fcache_t	*fptr;
3133 
3134 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3135 
3136 	CACHE_LOCK(ha);
3137 	if (ha->fcache == NULL) {
3138 		CACHE_UNLOCK(ha);
3139 		cmd->Status = EXT_STATUS_ERR;
3140 		EL(ha, "failed, adapter fcache not setup\n");
3141 		return;
3142 	}
3143 
3144 	/* Make sure user passed enough buffer space */
3145 	for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3146 		bsize += FBUFSIZE;
3147 	}
3148 
3149 	if (cmd->ResponseLen < bsize) {
3150 		CACHE_UNLOCK(ha);
3151 		if (cmd->ResponseLen != 0) {
3152 			EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3153 			    bsize, cmd->ResponseLen);
3154 		}
3155 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3156 		cmd->DetailStatus = bsize;
3157 		return;
3158 	}
3159 
3160 	boff = 0;
3161 	fptr = ha->fcache;
3162 	while ((fptr != NULL) && (fptr->buf != NULL)) {
3163 		/* Get the next image */
3164 		if (ddi_copyout(fptr->buf,
3165 		    (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3166 		    (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3167 		    mode) != 0) {
3168 			CACHE_UNLOCK(ha);
3169 			EL(ha, "failed, ddicopy at %xh, done\n", boff);
3170 			cmd->Status = EXT_STATUS_COPY_ERR;
3171 			cmd->DetailStatus = 0;
3172 			return;
3173 		}
3174 		boff += FBUFSIZE;
3175 		fptr = fptr->next;
3176 	}
3177 
3178 	CACHE_UNLOCK(ha);
3179 	cmd->Status = EXT_STATUS_OK;
3180 	cmd->DetailStatus = bsize;
3181 
3182 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3183 }
3184 
3185 /*
3186  * ql_read_flash
3187  *	Get flash contents.
3188  *
3189  * Input:
3190  *	ha:	adapter state pointer.
3191  *	cmd:	EXT_IOCTL cmd struct pointer.
3192  *	mode:	flags.
3193  *
3194  * Returns:
3195  *	None, request status indicated in cmd->Status.
3196  *
3197  * Context:
3198  *	Kernel context.
3199  */
3200 static void
ql_read_flash(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3201 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3202 {
3203 	ql_xioctl_t	*xp = ha->xioctl;
3204 
3205 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3206 
3207 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3208 		EL(ha, "ql_stall_driver failed\n");
3209 		cmd->Status = EXT_STATUS_BUSY;
3210 		cmd->DetailStatus = xp->fdesc.flash_size;
3211 		cmd->ResponseLen = 0;
3212 		return;
3213 	}
3214 
3215 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3216 		cmd->Status = EXT_STATUS_ERR;
3217 		cmd->DetailStatus = xp->fdesc.flash_size;
3218 		EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3219 		    cmd->ResponseLen, xp->fdesc.flash_size);
3220 		cmd->ResponseLen = 0;
3221 	} else {
3222 		/* adjust read size to flash size */
3223 		if (cmd->ResponseLen > xp->fdesc.flash_size) {
3224 			EL(ha, "adjusting req=%xh, max=%xh\n",
3225 			    cmd->ResponseLen, xp->fdesc.flash_size);
3226 			cmd->ResponseLen = xp->fdesc.flash_size;
3227 		}
3228 
3229 		/* Get flash data. */
3230 		if (ql_flash_fcode_dump(ha,
3231 		    (void *)(uintptr_t)(cmd->ResponseAdr),
3232 		    (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3233 			cmd->Status = EXT_STATUS_COPY_ERR;
3234 			cmd->ResponseLen = 0;
3235 			EL(ha, "failed,\n");
3236 		}
3237 	}
3238 
3239 	/* Resume I/O */
3240 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
3241 		ql_restart_driver(ha);
3242 	} else {
3243 		EL(ha, "isp_abort_needed for restart\n");
3244 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3245 		    DRIVER_STALL);
3246 	}
3247 
3248 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3249 }
3250 
3251 /*
3252  * ql_write_flash
3253  *	Loads flash contents.
3254  *
3255  * Input:
3256  *	ha:	adapter state pointer.
3257  *	cmd:	EXT_IOCTL cmd struct pointer.
3258  *	mode:	flags.
3259  *
3260  * Returns:
3261  *	None, request status indicated in cmd->Status.
3262  *
3263  * Context:
3264  *	Kernel context.
3265  */
3266 static void
ql_write_flash(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3267 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3268 {
3269 	ql_xioctl_t	*xp = ha->xioctl;
3270 
3271 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3272 
3273 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3274 		EL(ha, "ql_stall_driver failed\n");
3275 		cmd->Status = EXT_STATUS_BUSY;
3276 		cmd->DetailStatus = xp->fdesc.flash_size;
3277 		cmd->ResponseLen = 0;
3278 		return;
3279 	}
3280 
3281 	if (ql_setup_fcache(ha) != QL_SUCCESS) {
3282 		cmd->Status = EXT_STATUS_ERR;
3283 		cmd->DetailStatus = xp->fdesc.flash_size;
3284 		EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3285 		    cmd->RequestLen, xp->fdesc.flash_size);
3286 		cmd->ResponseLen = 0;
3287 	} else {
3288 		/* Load flash data. */
3289 		if (cmd->RequestLen > xp->fdesc.flash_size) {
3290 			cmd->Status = EXT_STATUS_ERR;
3291 			cmd->DetailStatus =  xp->fdesc.flash_size;
3292 			EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3293 			    cmd->RequestLen, xp->fdesc.flash_size);
3294 		} else if (ql_flash_fcode_load(ha,
3295 		    (void *)(uintptr_t)(cmd->RequestAdr),
3296 		    (size_t)(cmd->RequestLen), mode) != 0) {
3297 			cmd->Status = EXT_STATUS_COPY_ERR;
3298 			EL(ha, "failed,\n");
3299 		}
3300 	}
3301 
3302 	/* Resume I/O */
3303 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
3304 		ql_restart_driver(ha);
3305 	} else {
3306 		EL(ha, "isp_abort_needed for restart\n");
3307 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3308 		    DRIVER_STALL);
3309 	}
3310 
3311 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3312 }
3313 
3314 /*
3315  * ql_diagnostic_loopback
3316  *	Performs EXT_CC_LOOPBACK Command
3317  *
3318  * Input:
3319  *	ha:	adapter state pointer.
3320  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3321  *	mode:	flags.
3322  *
3323  * Returns:
3324  *	None, request status indicated in cmd->Status.
3325  *
3326  * Context:
3327  *	Kernel context.
3328  */
3329 static void
ql_diagnostic_loopback(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3330 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3331 {
3332 	EXT_LOOPBACK_REQ	plbreq;
3333 	EXT_LOOPBACK_RSP	plbrsp;
3334 	ql_mbx_data_t		mr;
3335 	uint32_t		rval;
3336 	caddr_t			bp;
3337 	uint16_t		opt;
3338 
3339 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3340 
3341 	/* Get loop back request. */
3342 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3343 	    (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3344 		EL(ha, "failed, ddi_copyin\n");
3345 		cmd->Status = EXT_STATUS_COPY_ERR;
3346 		cmd->ResponseLen = 0;
3347 		return;
3348 	}
3349 
3350 	opt = (uint16_t)(plbreq.Options & MBC_LOOPBACK_POINT_MASK);
3351 
3352 	/* Check transfer length fits in buffer. */
3353 	if (plbreq.BufferLength < plbreq.TransferCount &&
3354 	    plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3355 		EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3356 		    "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3357 		    plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3358 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3359 		cmd->ResponseLen = 0;
3360 		return;
3361 	}
3362 
3363 	/* Allocate command memory. */
3364 	bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3365 	if (bp == NULL) {
3366 		EL(ha, "failed, kmem_zalloc\n");
3367 		cmd->Status = EXT_STATUS_NO_MEMORY;
3368 		cmd->ResponseLen = 0;
3369 		return;
3370 	}
3371 
3372 	/* Get loopback data. */
3373 	if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3374 	    bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3375 		EL(ha, "failed, ddi_copyin-2\n");
3376 		kmem_free(bp, plbreq.TransferCount);
3377 		cmd->Status = EXT_STATUS_COPY_ERR;
3378 		cmd->ResponseLen = 0;
3379 		return;
3380 	}
3381 
3382 	if ((ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) ||
3383 	    ql_stall_driver(ha, 0) != QL_SUCCESS) {
3384 		EL(ha, "failed, LOOP_NOT_READY\n");
3385 		kmem_free(bp, plbreq.TransferCount);
3386 		cmd->Status = EXT_STATUS_BUSY;
3387 		cmd->ResponseLen = 0;
3388 		return;
3389 	}
3390 
3391 	/* Shutdown IP. */
3392 	if (ha->flags & IP_INITIALIZED) {
3393 		(void) ql_shutdown_ip(ha);
3394 	}
3395 
3396 	/* determine topology so we can send the loopback or the echo */
3397 	/* Echo is supported on 2300's only and above */
3398 
3399 	if (CFG_IST(ha, CFG_CTRL_8081)) {
3400 		if (!(ha->task_daemon_flags & LOOP_DOWN) && opt ==
3401 		    MBC_LOOPBACK_POINT_EXTERNAL) {
3402 			if (plbreq.TransferCount > 252) {
3403 				EL(ha, "transfer count (%d) > 252\n",
3404 				    plbreq.TransferCount);
3405 				kmem_free(bp, plbreq.TransferCount);
3406 				cmd->Status = EXT_STATUS_INVALID_PARAM;
3407 				cmd->ResponseLen = 0;
3408 				return;
3409 			}
3410 			plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3411 			rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3412 			    MBC_ECHO_ELS, &mr);
3413 		} else {
3414 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
3415 				(void) ql_set_loop_point(ha, opt);
3416 			}
3417 			plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3418 			rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3419 			    opt, plbreq.IterationCount, &mr);
3420 			if (CFG_IST(ha, CFG_CTRL_81XX)) {
3421 				(void) ql_set_loop_point(ha, 0);
3422 			}
3423 		}
3424 	} else {
3425 		if (!(ha->task_daemon_flags & LOOP_DOWN) &&
3426 		    (ha->topology & QL_F_PORT) &&
3427 		    ha->device_id >= 0x2300) {
3428 			QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using "
3429 			    "echo\n", ha->instance);
3430 			plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3431 			rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3432 			    (uint16_t)(CFG_IST(ha, CFG_CTRL_8081) ?
3433 			    MBC_ECHO_ELS : MBC_ECHO_64BIT), &mr);
3434 		} else {
3435 			plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3436 			rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3437 			    opt, plbreq.IterationCount, &mr);
3438 		}
3439 	}
3440 
3441 	ql_restart_driver(ha);
3442 
3443 	/* Restart IP if it was shutdown. */
3444 	if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3445 		(void) ql_initialize_ip(ha);
3446 		ql_isp_rcvbuf(ha);
3447 	}
3448 
3449 	if (rval != QL_SUCCESS) {
3450 		EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3451 		kmem_free(bp, plbreq.TransferCount);
3452 		cmd->Status = EXT_STATUS_MAILBOX;
3453 		cmd->DetailStatus = rval;
3454 		cmd->ResponseLen = 0;
3455 		return;
3456 	}
3457 
3458 	/* Return loopback data. */
3459 	if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3460 	    plbreq.TransferCount, mode) != plbreq.TransferCount) {
3461 		EL(ha, "failed, ddi_copyout\n");
3462 		kmem_free(bp, plbreq.TransferCount);
3463 		cmd->Status = EXT_STATUS_COPY_ERR;
3464 		cmd->ResponseLen = 0;
3465 		return;
3466 	}
3467 	kmem_free(bp, plbreq.TransferCount);
3468 
3469 	/* Return loopback results. */
3470 	plbrsp.BufferAddress = plbreq.BufferAddress;
3471 	plbrsp.BufferLength = plbreq.TransferCount;
3472 	plbrsp.CompletionStatus = mr.mb[0];
3473 
3474 	if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3475 		plbrsp.CrcErrorCount = 0;
3476 		plbrsp.DisparityErrorCount = 0;
3477 		plbrsp.FrameLengthErrorCount = 0;
3478 		plbrsp.IterationCountLastError = 0;
3479 	} else {
3480 		plbrsp.CrcErrorCount = mr.mb[1];
3481 		plbrsp.DisparityErrorCount = mr.mb[2];
3482 		plbrsp.FrameLengthErrorCount = mr.mb[3];
3483 		plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3484 	}
3485 
3486 	rval = ddi_copyout((void *)&plbrsp,
3487 	    (void *)(uintptr_t)cmd->ResponseAdr,
3488 	    sizeof (EXT_LOOPBACK_RSP), mode);
3489 	if (rval != 0) {
3490 		EL(ha, "failed, ddi_copyout-2\n");
3491 		cmd->Status = EXT_STATUS_COPY_ERR;
3492 		cmd->ResponseLen = 0;
3493 		return;
3494 	}
3495 	cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3496 
3497 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3498 }
3499 
3500 /*
3501  * ql_set_loop_point
3502  *	Setup loop point for port configuration.
3503  *
3504  * Input:
3505  *	ha:	adapter state structure.
3506  *	opt:	loop point option.
3507  *
3508  * Returns:
3509  *	ql local function return status code.
3510  *
3511  * Context:
3512  *	Kernel context.
3513  */
3514 static int
ql_set_loop_point(ql_adapter_state_t * ha,uint16_t opt)3515 ql_set_loop_point(ql_adapter_state_t *ha, uint16_t opt)
3516 {
3517 	ql_mbx_data_t	mr;
3518 	int		rval;
3519 	uint32_t	timer;
3520 
3521 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3522 
3523 	/*
3524 	 * We get the current port config, modify the loopback field and
3525 	 * write it back out.
3526 	 */
3527 	if ((rval = ql_get_port_config(ha, &mr)) != QL_SUCCESS) {
3528 		EL(ha, "get_port_config status=%xh\n", rval);
3529 		return (rval);
3530 	}
3531 	/*
3532 	 * Set the loopback mode field while maintaining the others.
3533 	 * Currently only internal or none are supported.
3534 	 */
3535 	mr.mb[1] = (uint16_t)(mr.mb[1] &~LOOPBACK_MODE_FIELD_MASK);
3536 	if (opt == MBC_LOOPBACK_POINT_INTERNAL) {
3537 		mr.mb[1] = (uint16_t)(mr.mb[1] |
3538 		    LOOPBACK_MODE(LOOPBACK_MODE_INTERNAL));
3539 	}
3540 	/*
3541 	 * Changing the port configuration will cause the port state to cycle
3542 	 * down and back up. The indication that this has happened is that
3543 	 * the point to point flag gets set.
3544 	 */
3545 	ADAPTER_STATE_LOCK(ha);
3546 	ha->flags &= ~POINT_TO_POINT;
3547 	ADAPTER_STATE_UNLOCK(ha);
3548 	if ((rval = ql_set_port_config(ha, &mr)) != QL_SUCCESS) {
3549 		EL(ha, "set_port_config status=%xh\n", rval);
3550 	}
3551 
3552 	/* wait for a while */
3553 	for (timer = opt ? 10 : 0; timer; timer--) {
3554 		if (ha->flags & POINT_TO_POINT) {
3555 			break;
3556 		}
3557 		/* Delay for 1000000 usec (1 second). */
3558 		ql_delay(ha, 1000000);
3559 	}
3560 
3561 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3562 
3563 	return (rval);
3564 }
3565 
3566 /*
3567  * ql_send_els_rnid
3568  *	IOCTL for extended link service RNID command.
3569  *
3570  * Input:
3571  *	ha:	adapter state pointer.
3572  *	cmd:	User space CT arguments pointer.
3573  *	mode:	flags.
3574  *
3575  * Returns:
3576  *	None, request status indicated in cmd->Status.
3577  *
3578  * Context:
3579  *	Kernel context.
3580  */
3581 static void
ql_send_els_rnid(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3582 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3583 {
3584 	EXT_RNID_REQ	tmp_rnid;
3585 	port_id_t	tmp_fcid;
3586 	caddr_t		tmp_buf, bptr;
3587 	uint32_t	copy_len;
3588 	ql_tgt_t	*tq;
3589 	EXT_RNID_DATA	rnid_data;
3590 	uint32_t	loop_ready_wait = 10 * 60 * 10;
3591 	int		rval = 0;
3592 	uint32_t	local_hba = 0;
3593 
3594 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3595 
3596 	if (DRIVER_SUSPENDED(ha)) {
3597 		EL(ha, "failed, LOOP_NOT_READY\n");
3598 		cmd->Status = EXT_STATUS_BUSY;
3599 		cmd->ResponseLen = 0;
3600 		return;
3601 	}
3602 
3603 	if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3604 		/* parameter error */
3605 		EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3606 		    cmd->RequestLen);
3607 		cmd->Status = EXT_STATUS_INVALID_PARAM;
3608 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3609 		cmd->ResponseLen = 0;
3610 		return;
3611 	}
3612 
3613 	if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3614 	    &tmp_rnid, cmd->RequestLen, mode) != 0) {
3615 		EL(ha, "failed, ddi_copyin\n");
3616 		cmd->Status = EXT_STATUS_COPY_ERR;
3617 		cmd->ResponseLen = 0;
3618 		return;
3619 	}
3620 
3621 	/* Find loop ID of the device */
3622 	if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3623 		bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3624 		    (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3625 		    (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3626 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3627 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3628 			local_hba = 1;
3629 		} else {
3630 			tq = ql_find_port(ha,
3631 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3632 		}
3633 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3634 		bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3635 		    (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3636 		    (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3637 		if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3638 		    EXT_DEF_WWN_NAME_SIZE) == 0) {
3639 			local_hba = 1;
3640 		} else {
3641 			tq = ql_find_port(ha,
3642 			    (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3643 		}
3644 	} else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3645 		/*
3646 		 * Copy caller's d_id to tmp space.
3647 		 */
3648 		bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3649 		    EXT_DEF_PORTID_SIZE_ACTUAL);
3650 		BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3651 
3652 		if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3653 		    EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3654 			local_hba = 1;
3655 		} else {
3656 			tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3657 			    QLNT_PID);
3658 		}
3659 	}
3660 
3661 	/* Allocate memory for command. */
3662 	tmp_buf = kmem_zalloc(SEND_RNID_RSP_SIZE, KM_SLEEP);
3663 	if (tmp_buf == NULL) {
3664 		EL(ha, "failed, kmem_zalloc\n");
3665 		cmd->Status = EXT_STATUS_NO_MEMORY;
3666 		cmd->ResponseLen = 0;
3667 		return;
3668 	}
3669 
3670 	if (local_hba) {
3671 		rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3672 		if (rval != QL_SUCCESS) {
3673 			EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3674 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3675 			cmd->Status = EXT_STATUS_ERR;
3676 			cmd->ResponseLen = 0;
3677 			return;
3678 		}
3679 
3680 		/* Save gotten RNID data. */
3681 		bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3682 
3683 		/* Now build the Send RNID response */
3684 		tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3685 		tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3686 		tmp_buf[2] = 0;
3687 		tmp_buf[3] = sizeof (EXT_RNID_DATA);
3688 
3689 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
3690 			bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3691 			    EXT_DEF_WWN_NAME_SIZE);
3692 			bcopy(ha->init_ctrl_blk.cb24.node_name,
3693 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3694 			    EXT_DEF_WWN_NAME_SIZE);
3695 		} else {
3696 			bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3697 			    EXT_DEF_WWN_NAME_SIZE);
3698 			bcopy(ha->init_ctrl_blk.cb.node_name,
3699 			    &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3700 			    EXT_DEF_WWN_NAME_SIZE);
3701 		}
3702 
3703 		bcopy((uint8_t *)&rnid_data,
3704 		    &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3705 		    sizeof (EXT_RNID_DATA));
3706 	} else {
3707 		if (tq == NULL) {
3708 			/* no matching device */
3709 			EL(ha, "failed, device not found\n");
3710 			kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3711 			cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3712 			cmd->DetailStatus = EXT_DSTATUS_TARGET;
3713 			cmd->ResponseLen = 0;
3714 			return;
3715 		}
3716 
3717 		/* Send command */
3718 		rval = ql_send_rnid_els(ha, tq->loop_id,
3719 		    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3720 		if (rval != QL_SUCCESS) {
3721 			EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3722 			    rval, tq->loop_id);
3723 			while (LOOP_NOT_READY(ha)) {
3724 				ql_delay(ha, 100000);
3725 				if (loop_ready_wait-- == 0) {
3726 					EL(ha, "failed, loop not ready\n");
3727 					cmd->Status = EXT_STATUS_ERR;
3728 					cmd->ResponseLen = 0;
3729 				}
3730 			}
3731 			rval = ql_send_rnid_els(ha, tq->loop_id,
3732 			    (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE,
3733 			    tmp_buf);
3734 			if (rval != QL_SUCCESS) {
3735 				/* error */
3736 				EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3737 				    rval, tq->loop_id);
3738 				kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3739 				cmd->Status = EXT_STATUS_ERR;
3740 				cmd->ResponseLen = 0;
3741 				return;
3742 			}
3743 		}
3744 	}
3745 
3746 	/* Copy the response */
3747 	copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3748 	    SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3749 
3750 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3751 	    copy_len, mode) != copy_len) {
3752 		cmd->Status = EXT_STATUS_COPY_ERR;
3753 		EL(ha, "failed, ddi_copyout\n");
3754 	} else {
3755 		cmd->ResponseLen = copy_len;
3756 		if (copy_len < SEND_RNID_RSP_SIZE) {
3757 			cmd->Status = EXT_STATUS_DATA_OVERRUN;
3758 			EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3759 
3760 		} else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3761 			cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3762 			EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3763 		} else {
3764 			cmd->Status = EXT_STATUS_OK;
3765 			QL_PRINT_9(CE_CONT, "(%d): done\n",
3766 			    ha->instance);
3767 		}
3768 	}
3769 
3770 	kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3771 }
3772 
3773 /*
3774  * ql_set_host_data
3775  *	Process IOCTL subcommand to set host/adapter related data.
3776  *
3777  * Input:
3778  *	ha:	adapter state pointer.
3779  *	cmd:	User space CT arguments pointer.
3780  *	mode:	flags.
3781  *
3782  * Returns:
3783  *	None, request status indicated in cmd->Status.
3784  *
3785  * Context:
3786  *	Kernel context.
3787  */
3788 static void
ql_set_host_data(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3789 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3790 {
3791 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3792 	    cmd->SubCode);
3793 
3794 	/*
3795 	 * case off on command subcode
3796 	 */
3797 	switch (cmd->SubCode) {
3798 	case EXT_SC_SET_RNID:
3799 		ql_set_rnid_parameters(ha, cmd, mode);
3800 		break;
3801 	case EXT_SC_RST_STATISTICS:
3802 		(void) ql_reset_statistics(ha, cmd);
3803 		break;
3804 	case EXT_SC_SET_BEACON_STATE:
3805 		ql_set_led_state(ha, cmd, mode);
3806 		break;
3807 	case EXT_SC_SET_PARMS:
3808 	case EXT_SC_SET_BUS_MODE:
3809 	case EXT_SC_SET_DR_DUMP_BUF:
3810 	case EXT_SC_SET_RISC_CODE:
3811 	case EXT_SC_SET_FLASH_RAM:
3812 	case EXT_SC_SET_LUN_BITMASK:
3813 	case EXT_SC_SET_RETRY_CNT:
3814 	case EXT_SC_SET_RTIN:
3815 	case EXT_SC_SET_FC_LUN_BITMASK:
3816 	case EXT_SC_ADD_TARGET_DEVICE:
3817 	case EXT_SC_SWAP_TARGET_DEVICE:
3818 	case EXT_SC_SET_SEL_TIMEOUT:
3819 	default:
3820 		/* function not supported. */
3821 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3822 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3823 		break;
3824 	}
3825 
3826 	if (cmd->Status != EXT_STATUS_OK) {
3827 		EL(ha, "failed, Status=%d\n", cmd->Status);
3828 	} else {
3829 		/*EMPTY*/
3830 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3831 	}
3832 }
3833 
3834 /*
3835  * ql_get_host_data
3836  *	Performs EXT_CC_GET_DATA subcommands.
3837  *
3838  * Input:
3839  *	ha:	adapter state pointer.
3840  *	cmd:	Local EXT_IOCTL cmd struct pointer.
3841  *	mode:	flags.
3842  *
3843  * Returns:
3844  *	None, request status indicated in cmd->Status.
3845  *
3846  * Context:
3847  *	Kernel context.
3848  */
3849 static void
ql_get_host_data(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3850 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3851 {
3852 	int	out_size = 0;
3853 
3854 	QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3855 	    cmd->SubCode);
3856 
3857 	/* case off on command subcode */
3858 	switch (cmd->SubCode) {
3859 	case EXT_SC_GET_STATISTICS:
3860 		out_size = sizeof (EXT_HBA_PORT_STAT);
3861 		break;
3862 	case EXT_SC_GET_FC_STATISTICS:
3863 		out_size = sizeof (EXT_HBA_PORT_STAT);
3864 		break;
3865 	case EXT_SC_GET_PORT_SUMMARY:
3866 		out_size = sizeof (EXT_DEVICEDATA);
3867 		break;
3868 	case EXT_SC_GET_RNID:
3869 		out_size = sizeof (EXT_RNID_DATA);
3870 		break;
3871 	case EXT_SC_GET_TARGET_ID:
3872 		out_size = sizeof (EXT_DEST_ADDR);
3873 		break;
3874 	case EXT_SC_GET_BEACON_STATE:
3875 		out_size = sizeof (EXT_BEACON_CONTROL);
3876 		break;
3877 	case EXT_SC_GET_FC4_STATISTICS:
3878 		out_size = sizeof (EXT_HBA_FC4STATISTICS);
3879 		break;
3880 	case EXT_SC_GET_DCBX_PARAM:
3881 		out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3882 		break;
3883 	case EXT_SC_GET_RESOURCE_CNTS:
3884 		out_size = sizeof (EXT_RESOURCE_CNTS);
3885 		break;
3886 	case EXT_SC_GET_FCF_LIST:
3887 		out_size = sizeof (EXT_FCF_LIST);
3888 		break;
3889 	case EXT_SC_GET_SCSI_ADDR:
3890 	case EXT_SC_GET_ERR_DETECTIONS:
3891 	case EXT_SC_GET_BUS_MODE:
3892 	case EXT_SC_GET_DR_DUMP_BUF:
3893 	case EXT_SC_GET_RISC_CODE:
3894 	case EXT_SC_GET_FLASH_RAM:
3895 	case EXT_SC_GET_LINK_STATUS:
3896 	case EXT_SC_GET_LOOP_ID:
3897 	case EXT_SC_GET_LUN_BITMASK:
3898 	case EXT_SC_GET_PORT_DATABASE:
3899 	case EXT_SC_GET_PORT_DATABASE_MEM:
3900 	case EXT_SC_GET_POSITION_MAP:
3901 	case EXT_SC_GET_RETRY_CNT:
3902 	case EXT_SC_GET_RTIN:
3903 	case EXT_SC_GET_FC_LUN_BITMASK:
3904 	case EXT_SC_GET_SEL_TIMEOUT:
3905 	default:
3906 		/* function not supported. */
3907 		EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3908 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3909 		cmd->ResponseLen = 0;
3910 		return;
3911 	}
3912 
3913 	if (cmd->ResponseLen < out_size) {
3914 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3915 		cmd->DetailStatus = out_size;
3916 		EL(ha, "failed, ResponseLen=%xh, size=%xh\n",
3917 		    cmd->ResponseLen, out_size);
3918 		cmd->ResponseLen = 0;
3919 		return;
3920 	}
3921 
3922 	switch (cmd->SubCode) {
3923 	case EXT_SC_GET_RNID:
3924 		ql_get_rnid_parameters(ha, cmd, mode);
3925 		break;
3926 	case EXT_SC_GET_STATISTICS:
3927 		ql_get_statistics(ha, cmd, mode);
3928 		break;
3929 	case EXT_SC_GET_FC_STATISTICS:
3930 		ql_get_statistics_fc(ha, cmd, mode);
3931 		break;
3932 	case EXT_SC_GET_FC4_STATISTICS:
3933 		ql_get_statistics_fc4(ha, cmd, mode);
3934 		break;
3935 	case EXT_SC_GET_PORT_SUMMARY:
3936 		ql_get_port_summary(ha, cmd, mode);
3937 		break;
3938 	case EXT_SC_GET_TARGET_ID:
3939 		ql_get_target_id(ha, cmd, mode);
3940 		break;
3941 	case EXT_SC_GET_BEACON_STATE:
3942 		ql_get_led_state(ha, cmd, mode);
3943 		break;
3944 	case EXT_SC_GET_DCBX_PARAM:
3945 		ql_get_dcbx_parameters(ha, cmd, mode);
3946 		break;
3947 	case EXT_SC_GET_FCF_LIST:
3948 		ql_get_fcf_list(ha, cmd, mode);
3949 		break;
3950 	case EXT_SC_GET_RESOURCE_CNTS:
3951 		ql_get_resource_counts(ha, cmd, mode);
3952 		break;
3953 	}
3954 
3955 	if (cmd->Status != EXT_STATUS_OK) {
3956 		EL(ha, "failed, Status=%d\n", cmd->Status);
3957 	} else {
3958 		/*EMPTY*/
3959 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3960 	}
3961 }
3962 
3963 /* ******************************************************************** */
3964 /*			Helper Functions				*/
3965 /* ******************************************************************** */
3966 
3967 /*
3968  * ql_lun_count
3969  *	Get numbers of LUNS on target.
3970  *
3971  * Input:
3972  *	ha:	adapter state pointer.
3973  *	q:	device queue pointer.
3974  *
3975  * Returns:
3976  *	Number of LUNs.
3977  *
3978  * Context:
3979  *	Kernel context.
3980  */
3981 static int
ql_lun_count(ql_adapter_state_t * ha,ql_tgt_t * tq)3982 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3983 {
3984 	int	cnt;
3985 
3986 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3987 
3988 	/* Bypass LUNs that failed. */
3989 	cnt = ql_report_lun(ha, tq);
3990 	if (cnt == 0) {
3991 		cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3992 	}
3993 
3994 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3995 
3996 	return (cnt);
3997 }
3998 
3999 /*
4000  * ql_report_lun
4001  *	Get numbers of LUNS using report LUN command.
4002  *
4003  * Input:
4004  *	ha:	adapter state pointer.
4005  *	q:	target queue pointer.
4006  *
4007  * Returns:
4008  *	Number of LUNs.
4009  *
4010  * Context:
4011  *	Kernel context.
4012  */
4013 static int
ql_report_lun(ql_adapter_state_t * ha,ql_tgt_t * tq)4014 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
4015 {
4016 	int			rval;
4017 	uint8_t			retries;
4018 	ql_mbx_iocb_t		*pkt;
4019 	ql_rpt_lun_lst_t	*rpt;
4020 	dma_mem_t		dma_mem;
4021 	uint32_t		pkt_size, cnt;
4022 	uint16_t		comp_status;
4023 	uint8_t			scsi_status_h, scsi_status_l, *reqs;
4024 
4025 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4026 
4027 	if (DRIVER_SUSPENDED(ha)) {
4028 		EL(ha, "failed, LOOP_NOT_READY\n");
4029 		return (0);
4030 	}
4031 
4032 	pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
4033 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4034 	if (pkt == NULL) {
4035 		EL(ha, "failed, kmem_zalloc\n");
4036 		return (0);
4037 	}
4038 	rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4039 
4040 	/* Get DMA memory for the IOCB */
4041 	if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
4042 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4043 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4044 		    "alloc failed", QL_NAME, ha->instance);
4045 		kmem_free(pkt, pkt_size);
4046 		return (0);
4047 	}
4048 
4049 	for (retries = 0; retries < 4; retries++) {
4050 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4051 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4052 			pkt->cmd24.entry_count = 1;
4053 
4054 			/* Set N_port handle */
4055 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4056 
4057 			/* Set target ID */
4058 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4059 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4060 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4061 
4062 			/* Set Virtual Port ID */
4063 			pkt->cmd24.vp_index = ha->vp_index;
4064 
4065 			/* Set ISP command timeout. */
4066 			pkt->cmd24.timeout = LE_16(15);
4067 
4068 			/* Load SCSI CDB */
4069 			pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
4070 			pkt->cmd24.scsi_cdb[6] =
4071 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4072 			pkt->cmd24.scsi_cdb[7] =
4073 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4074 			pkt->cmd24.scsi_cdb[8] =
4075 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4076 			pkt->cmd24.scsi_cdb[9] =
4077 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4078 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4079 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4080 				    + cnt, 4);
4081 			}
4082 
4083 			/* Set tag queue control flags */
4084 			pkt->cmd24.task = TA_STAG;
4085 
4086 			/* Set transfer direction. */
4087 			pkt->cmd24.control_flags = CF_RD;
4088 
4089 			/* Set data segment count. */
4090 			pkt->cmd24.dseg_count = LE_16(1);
4091 
4092 			/* Load total byte count. */
4093 			/* Load data descriptor. */
4094 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4095 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4096 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4097 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4098 			pkt->cmd24.total_byte_count =
4099 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4100 			pkt->cmd24.dseg_0_length =
4101 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4102 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4103 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4104 			pkt->cmd3.entry_count = 1;
4105 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4106 				pkt->cmd3.target_l = LSB(tq->loop_id);
4107 				pkt->cmd3.target_h = MSB(tq->loop_id);
4108 			} else {
4109 				pkt->cmd3.target_h = LSB(tq->loop_id);
4110 			}
4111 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4112 			pkt->cmd3.timeout = LE_16(15);
4113 			pkt->cmd3.dseg_count = LE_16(1);
4114 			pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4115 			pkt->cmd3.scsi_cdb[6] =
4116 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4117 			pkt->cmd3.scsi_cdb[7] =
4118 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4119 			pkt->cmd3.scsi_cdb[8] =
4120 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4121 			pkt->cmd3.scsi_cdb[9] =
4122 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4123 			pkt->cmd3.byte_count =
4124 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4125 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4126 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4127 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4128 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4129 			pkt->cmd3.dseg_0_length =
4130 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4131 		} else {
4132 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4133 			pkt->cmd.entry_count = 1;
4134 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4135 				pkt->cmd.target_l = LSB(tq->loop_id);
4136 				pkt->cmd.target_h = MSB(tq->loop_id);
4137 			} else {
4138 				pkt->cmd.target_h = LSB(tq->loop_id);
4139 			}
4140 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4141 			pkt->cmd.timeout = LE_16(15);
4142 			pkt->cmd.dseg_count = LE_16(1);
4143 			pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4144 			pkt->cmd.scsi_cdb[6] =
4145 			    MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4146 			pkt->cmd.scsi_cdb[7] =
4147 			    LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4148 			pkt->cmd.scsi_cdb[8] =
4149 			    MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4150 			pkt->cmd.scsi_cdb[9] =
4151 			    LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4152 			pkt->cmd.byte_count =
4153 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4154 			pkt->cmd.dseg_0_address = (uint32_t)
4155 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4156 			pkt->cmd.dseg_0_length =
4157 			    LE_32(sizeof (ql_rpt_lun_lst_t));
4158 		}
4159 
4160 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4161 		    sizeof (ql_mbx_iocb_t));
4162 
4163 		/* Sync in coming DMA buffer. */
4164 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4165 		    DDI_DMA_SYNC_FORKERNEL);
4166 		/* Copy in coming DMA data. */
4167 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4168 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4169 
4170 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4171 			pkt->sts24.entry_status = (uint8_t)
4172 			    (pkt->sts24.entry_status & 0x3c);
4173 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4174 			scsi_status_h = pkt->sts24.scsi_status_h;
4175 			scsi_status_l = pkt->sts24.scsi_status_l;
4176 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4177 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4178 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4179 		} else {
4180 			pkt->sts.entry_status = (uint8_t)
4181 			    (pkt->sts.entry_status & 0x7e);
4182 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4183 			scsi_status_h = pkt->sts.scsi_status_h;
4184 			scsi_status_l = pkt->sts.scsi_status_l;
4185 			reqs = &pkt->sts.req_sense_data[0];
4186 		}
4187 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4188 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4189 			    pkt->sts.entry_status, tq->d_id.b24);
4190 			rval = QL_FUNCTION_PARAMETER_ERROR;
4191 		}
4192 
4193 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4194 		    scsi_status_l & STATUS_CHECK) {
4195 			/* Device underrun, treat as OK. */
4196 			if (rval == QL_SUCCESS &&
4197 			    comp_status == CS_DATA_UNDERRUN &&
4198 			    scsi_status_h & FCP_RESID_UNDER) {
4199 				break;
4200 			}
4201 
4202 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4203 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4204 			    comp_status, scsi_status_h, scsi_status_l);
4205 
4206 			if (rval == QL_SUCCESS) {
4207 				if ((comp_status == CS_TIMEOUT) ||
4208 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4209 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4210 					rval = QL_FUNCTION_TIMEOUT;
4211 					break;
4212 				}
4213 				rval = QL_FUNCTION_FAILED;
4214 			} else if (rval == QL_ABORTED) {
4215 				break;
4216 			}
4217 
4218 			if (scsi_status_l & STATUS_CHECK) {
4219 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4220 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4221 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4222 				    reqs[1], reqs[2], reqs[3], reqs[4],
4223 				    reqs[5], reqs[6], reqs[7], reqs[8],
4224 				    reqs[9], reqs[10], reqs[11], reqs[12],
4225 				    reqs[13], reqs[14], reqs[15], reqs[16],
4226 				    reqs[17]);
4227 			}
4228 		} else {
4229 			break;
4230 		}
4231 		bzero((caddr_t)pkt, pkt_size);
4232 	}
4233 
4234 	if (rval != QL_SUCCESS) {
4235 		EL(ha, "failed=%xh\n", rval);
4236 		rval = 0;
4237 	} else {
4238 		QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4239 		QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4240 		rval = (int)(BE_32(rpt->hdr.len) / 8);
4241 	}
4242 
4243 	kmem_free(pkt, pkt_size);
4244 	ql_free_dma_resource(ha, &dma_mem);
4245 
4246 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4247 
4248 	return (rval);
4249 }
4250 
4251 /*
4252  * ql_inq_scan
4253  *	Get numbers of LUNS using inquiry command.
4254  *
4255  * Input:
4256  *	ha:		adapter state pointer.
4257  *	tq:		target queue pointer.
4258  *	count:		scan for the number of existing LUNs.
4259  *
4260  * Returns:
4261  *	Number of LUNs.
4262  *
4263  * Context:
4264  *	Kernel context.
4265  */
4266 static int
ql_inq_scan(ql_adapter_state_t * ha,ql_tgt_t * tq,int count)4267 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4268 {
4269 	int		lun, cnt, rval;
4270 	ql_mbx_iocb_t	*pkt;
4271 	uint8_t		*inq;
4272 	uint32_t	pkt_size;
4273 
4274 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4275 
4276 	pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4277 	pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4278 	if (pkt == NULL) {
4279 		EL(ha, "failed, kmem_zalloc\n");
4280 		return (0);
4281 	}
4282 	inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4283 
4284 	cnt = 0;
4285 	for (lun = 0; lun < MAX_LUNS; lun++) {
4286 
4287 		if (DRIVER_SUSPENDED(ha)) {
4288 			rval = QL_LOOP_DOWN;
4289 			cnt = 0;
4290 			break;
4291 		}
4292 
4293 		rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4294 		if (rval == QL_SUCCESS) {
4295 			switch (*inq) {
4296 			case DTYPE_DIRECT:
4297 			case DTYPE_PROCESSOR:	/* Appliance. */
4298 			case DTYPE_WORM:
4299 			case DTYPE_RODIRECT:
4300 			case DTYPE_SCANNER:
4301 			case DTYPE_OPTICAL:
4302 			case DTYPE_CHANGER:
4303 			case DTYPE_ESI:
4304 				cnt++;
4305 				break;
4306 			case DTYPE_SEQUENTIAL:
4307 				cnt++;
4308 				tq->flags |= TQF_TAPE_DEVICE;
4309 				break;
4310 			default:
4311 				QL_PRINT_9(CE_CONT, "(%d): failed, "
4312 				    "unsupported device id=%xh, lun=%d, "
4313 				    "type=%xh\n", ha->instance, tq->loop_id,
4314 				    lun, *inq);
4315 				break;
4316 			}
4317 
4318 			if (*inq == DTYPE_ESI || cnt >= count) {
4319 				break;
4320 			}
4321 		} else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4322 			cnt = 0;
4323 			break;
4324 		}
4325 	}
4326 
4327 	kmem_free(pkt, pkt_size);
4328 
4329 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4330 
4331 	return (cnt);
4332 }
4333 
4334 /*
4335  * ql_inq
4336  *	Issue inquiry command.
4337  *
4338  * Input:
4339  *	ha:		adapter state pointer.
4340  *	tq:		target queue pointer.
4341  *	lun:		LUN number.
4342  *	pkt:		command and buffer pointer.
4343  *	inq_len:	amount of inquiry data.
4344  *
4345  * Returns:
4346  *	ql local function return status code.
4347  *
4348  * Context:
4349  *	Kernel context.
4350  */
4351 static int
ql_inq(ql_adapter_state_t * ha,ql_tgt_t * tq,int lun,ql_mbx_iocb_t * pkt,uint8_t inq_len)4352 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4353     uint8_t inq_len)
4354 {
4355 	dma_mem_t	dma_mem;
4356 	int		rval, retries;
4357 	uint32_t	pkt_size, cnt;
4358 	uint16_t	comp_status;
4359 	uint8_t		scsi_status_h, scsi_status_l, *reqs;
4360 	caddr_t		inq_data;
4361 
4362 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4363 
4364 	if (DRIVER_SUSPENDED(ha)) {
4365 		EL(ha, "failed, loop down\n");
4366 		return (QL_FUNCTION_TIMEOUT);
4367 	}
4368 
4369 	pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4370 	bzero((caddr_t)pkt, pkt_size);
4371 
4372 	inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4373 
4374 	/* Get DMA memory for the IOCB */
4375 	if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4376 	    LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4377 		cmn_err(CE_WARN, "%s(%d): DMA memory "
4378 		    "alloc failed", QL_NAME, ha->instance);
4379 		return (0);
4380 	}
4381 
4382 	for (retries = 0; retries < 4; retries++) {
4383 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4384 			pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4385 			pkt->cmd24.entry_count = 1;
4386 
4387 			/* Set LUN number */
4388 			pkt->cmd24.fcp_lun[2] = LSB(lun);
4389 			pkt->cmd24.fcp_lun[3] = MSB(lun);
4390 
4391 			/* Set N_port handle */
4392 			pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4393 
4394 			/* Set target ID */
4395 			pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4396 			pkt->cmd24.target_id[1] = tq->d_id.b.area;
4397 			pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4398 
4399 			/* Set Virtual Port ID */
4400 			pkt->cmd24.vp_index = ha->vp_index;
4401 
4402 			/* Set ISP command timeout. */
4403 			pkt->cmd24.timeout = LE_16(15);
4404 
4405 			/* Load SCSI CDB */
4406 			pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4407 			pkt->cmd24.scsi_cdb[4] = inq_len;
4408 			for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4409 				ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4410 				    + cnt, 4);
4411 			}
4412 
4413 			/* Set tag queue control flags */
4414 			pkt->cmd24.task = TA_STAG;
4415 
4416 			/* Set transfer direction. */
4417 			pkt->cmd24.control_flags = CF_RD;
4418 
4419 			/* Set data segment count. */
4420 			pkt->cmd24.dseg_count = LE_16(1);
4421 
4422 			/* Load total byte count. */
4423 			pkt->cmd24.total_byte_count = LE_32(inq_len);
4424 
4425 			/* Load data descriptor. */
4426 			pkt->cmd24.dseg_0_address[0] = (uint32_t)
4427 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4428 			pkt->cmd24.dseg_0_address[1] = (uint32_t)
4429 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4430 			pkt->cmd24.dseg_0_length = LE_32(inq_len);
4431 		} else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4432 			pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4433 			cnt = CMD_TYPE_3_DATA_SEGMENTS;
4434 
4435 			pkt->cmd3.entry_count = 1;
4436 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4437 				pkt->cmd3.target_l = LSB(tq->loop_id);
4438 				pkt->cmd3.target_h = MSB(tq->loop_id);
4439 			} else {
4440 				pkt->cmd3.target_h = LSB(tq->loop_id);
4441 			}
4442 			pkt->cmd3.lun_l = LSB(lun);
4443 			pkt->cmd3.lun_h = MSB(lun);
4444 			pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4445 			pkt->cmd3.timeout = LE_16(15);
4446 			pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4447 			pkt->cmd3.scsi_cdb[4] = inq_len;
4448 			pkt->cmd3.dseg_count = LE_16(1);
4449 			pkt->cmd3.byte_count = LE_32(inq_len);
4450 			pkt->cmd3.dseg_0_address[0] = (uint32_t)
4451 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4452 			pkt->cmd3.dseg_0_address[1] = (uint32_t)
4453 			    LE_32(MSD(dma_mem.cookie.dmac_laddress));
4454 			pkt->cmd3.dseg_0_length = LE_32(inq_len);
4455 		} else {
4456 			pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4457 			cnt = CMD_TYPE_2_DATA_SEGMENTS;
4458 
4459 			pkt->cmd.entry_count = 1;
4460 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4461 				pkt->cmd.target_l = LSB(tq->loop_id);
4462 				pkt->cmd.target_h = MSB(tq->loop_id);
4463 			} else {
4464 				pkt->cmd.target_h = LSB(tq->loop_id);
4465 			}
4466 			pkt->cmd.lun_l = LSB(lun);
4467 			pkt->cmd.lun_h = MSB(lun);
4468 			pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4469 			pkt->cmd.timeout = LE_16(15);
4470 			pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4471 			pkt->cmd.scsi_cdb[4] = inq_len;
4472 			pkt->cmd.dseg_count = LE_16(1);
4473 			pkt->cmd.byte_count = LE_32(inq_len);
4474 			pkt->cmd.dseg_0_address = (uint32_t)
4475 			    LE_32(LSD(dma_mem.cookie.dmac_laddress));
4476 			pkt->cmd.dseg_0_length = LE_32(inq_len);
4477 		}
4478 
4479 /*		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4480 		rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4481 		    sizeof (ql_mbx_iocb_t));
4482 
4483 		/* Sync in coming IOCB DMA buffer. */
4484 		(void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4485 		    DDI_DMA_SYNC_FORKERNEL);
4486 		/* Copy in coming DMA data. */
4487 		ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4488 		    (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4489 
4490 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
4491 			pkt->sts24.entry_status = (uint8_t)
4492 			    (pkt->sts24.entry_status & 0x3c);
4493 			comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4494 			scsi_status_h = pkt->sts24.scsi_status_h;
4495 			scsi_status_l = pkt->sts24.scsi_status_l;
4496 			cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4497 			    LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4498 			reqs = &pkt->sts24.rsp_sense_data[cnt];
4499 		} else {
4500 			pkt->sts.entry_status = (uint8_t)
4501 			    (pkt->sts.entry_status & 0x7e);
4502 			comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4503 			scsi_status_h = pkt->sts.scsi_status_h;
4504 			scsi_status_l = pkt->sts.scsi_status_l;
4505 			reqs = &pkt->sts.req_sense_data[0];
4506 		}
4507 		if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4508 			EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4509 			    pkt->sts.entry_status, tq->d_id.b24);
4510 			rval = QL_FUNCTION_PARAMETER_ERROR;
4511 		}
4512 
4513 		if (rval != QL_SUCCESS || comp_status != CS_COMPLETE ||
4514 		    scsi_status_l & STATUS_CHECK) {
4515 			EL(ha, "failed, issue_iocb=%xh, d_id=%xh, cs=%xh, "
4516 			    "ss_h=%xh, ss_l=%xh\n", rval, tq->d_id.b24,
4517 			    comp_status, scsi_status_h, scsi_status_l);
4518 
4519 			if (rval == QL_SUCCESS) {
4520 				if ((comp_status == CS_TIMEOUT) ||
4521 				    (comp_status == CS_PORT_UNAVAILABLE) ||
4522 				    (comp_status == CS_PORT_LOGGED_OUT)) {
4523 					rval = QL_FUNCTION_TIMEOUT;
4524 					break;
4525 				}
4526 				rval = QL_FUNCTION_FAILED;
4527 			}
4528 
4529 			if (scsi_status_l & STATUS_CHECK) {
4530 				EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4531 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4532 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4533 				    reqs[1], reqs[2], reqs[3], reqs[4],
4534 				    reqs[5], reqs[6], reqs[7], reqs[8],
4535 				    reqs[9], reqs[10], reqs[11], reqs[12],
4536 				    reqs[13], reqs[14], reqs[15], reqs[16],
4537 				    reqs[17]);
4538 			}
4539 		} else {
4540 			break;
4541 		}
4542 	}
4543 	ql_free_dma_resource(ha, &dma_mem);
4544 
4545 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4546 
4547 	return (rval);
4548 }
4549 
4550 /*
4551  * ql_get_buffer_data
4552  *	Copies data from user space to kernal buffer.
4553  *
4554  * Input:
4555  *	src:	User source buffer address.
4556  *	dst:	Kernal destination buffer address.
4557  *	size:	Amount of data.
4558  *	mode:	flags.
4559  *
4560  * Returns:
4561  *	Returns number of bytes transferred.
4562  *
4563  * Context:
4564  *	Kernel context.
4565  */
4566 static uint32_t
ql_get_buffer_data(caddr_t src,caddr_t dst,uint32_t size,int mode)4567 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4568 {
4569 	uint32_t	cnt;
4570 
4571 	for (cnt = 0; cnt < size; cnt++) {
4572 		if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4573 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4574 			break;
4575 		}
4576 	}
4577 
4578 	return (cnt);
4579 }
4580 
4581 /*
4582  * ql_send_buffer_data
4583  *	Copies data from kernal buffer to user space.
4584  *
4585  * Input:
4586  *	src:	Kernal source buffer address.
4587  *	dst:	User destination buffer address.
4588  *	size:	Amount of data.
4589  *	mode:	flags.
4590  *
4591  * Returns:
4592  *	Returns number of bytes transferred.
4593  *
4594  * Context:
4595  *	Kernel context.
4596  */
4597 static uint32_t
ql_send_buffer_data(caddr_t src,caddr_t dst,uint32_t size,int mode)4598 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4599 {
4600 	uint32_t	cnt;
4601 
4602 	for (cnt = 0; cnt < size; cnt++) {
4603 		if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4604 			QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4605 			break;
4606 		}
4607 	}
4608 
4609 	return (cnt);
4610 }
4611 
4612 /*
4613  * ql_find_port
4614  *	Locates device queue.
4615  *
4616  * Input:
4617  *	ha:	adapter state pointer.
4618  *	name:	device port name.
4619  *
4620  * Returns:
4621  *	Returns target queue pointer.
4622  *
4623  * Context:
4624  *	Kernel context.
4625  */
4626 static ql_tgt_t *
ql_find_port(ql_adapter_state_t * ha,uint8_t * name,uint16_t type)4627 ql_find_port(ql_adapter_state_t *ha, uint8_t *name, uint16_t type)
4628 {
4629 	ql_link_t	*link;
4630 	ql_tgt_t	*tq;
4631 	uint16_t	index;
4632 
4633 	/* Scan port list for requested target */
4634 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
4635 		for (link = ha->dev[index].first; link != NULL;
4636 		    link = link->next) {
4637 			tq = link->base_address;
4638 
4639 			switch (type) {
4640 			case QLNT_LOOP_ID:
4641 				if (bcmp(name, &tq->loop_id,
4642 				    sizeof (uint16_t)) == 0) {
4643 					return (tq);
4644 				}
4645 				break;
4646 			case QLNT_PORT:
4647 				if (bcmp(name, tq->port_name, 8) == 0) {
4648 					return (tq);
4649 				}
4650 				break;
4651 			case QLNT_NODE:
4652 				if (bcmp(name, tq->node_name, 8) == 0) {
4653 					return (tq);
4654 				}
4655 				break;
4656 			case QLNT_PID:
4657 				if (bcmp(name, tq->d_id.r.d_id,
4658 				    sizeof (tq->d_id.r.d_id)) == 0) {
4659 					return (tq);
4660 				}
4661 				break;
4662 			default:
4663 				EL(ha, "failed, invalid type=%d\n",  type);
4664 				return (NULL);
4665 			}
4666 		}
4667 	}
4668 
4669 	return (NULL);
4670 }
4671 
4672 /*
4673  * ql_24xx_flash_desc
4674  *	Get flash descriptor table.
4675  *
4676  * Input:
4677  *	ha:		adapter state pointer.
4678  *
4679  * Returns:
4680  *	ql local function return status code.
4681  *
4682  * Context:
4683  *	Kernel context.
4684  */
4685 static int
ql_24xx_flash_desc(ql_adapter_state_t * ha)4686 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4687 {
4688 	uint32_t	cnt;
4689 	uint16_t	chksum, *bp, data;
4690 	int		rval;
4691 	flash_desc_t	*fdesc;
4692 	ql_xioctl_t	*xp = ha->xioctl;
4693 
4694 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4695 
4696 	if (ha->flash_desc_addr == 0) {
4697 		QL_PRINT_9(CE_CONT, "(%d): desc ptr=0\n", ha->instance);
4698 		return (QL_FUNCTION_FAILED);
4699 	}
4700 
4701 	if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4702 		EL(ha, "kmem_zalloc=null\n");
4703 		return (QL_MEMORY_ALLOC_FAILED);
4704 	}
4705 	rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4706 	    ha->flash_desc_addr << 2);
4707 	if (rval != QL_SUCCESS) {
4708 		EL(ha, "read status=%xh\n", rval);
4709 		kmem_free(fdesc, sizeof (flash_desc_t));
4710 		return (rval);
4711 	}
4712 
4713 	chksum = 0;
4714 	bp = (uint16_t *)fdesc;
4715 	for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4716 		data = *bp++;
4717 		LITTLE_ENDIAN_16(&data);
4718 		chksum += data;
4719 	}
4720 
4721 	LITTLE_ENDIAN_32(&fdesc->flash_valid);
4722 	LITTLE_ENDIAN_16(&fdesc->flash_version);
4723 	LITTLE_ENDIAN_16(&fdesc->flash_len);
4724 	LITTLE_ENDIAN_16(&fdesc->flash_checksum);
4725 	LITTLE_ENDIAN_16(&fdesc->flash_manuf);
4726 	LITTLE_ENDIAN_16(&fdesc->flash_id);
4727 	LITTLE_ENDIAN_32(&fdesc->block_size);
4728 	LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4729 	LITTLE_ENDIAN_32(&fdesc->flash_size);
4730 	LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4731 	LITTLE_ENDIAN_32(&fdesc->read_timeout);
4732 
4733 	/* flash size in desc table is in 1024 bytes */
4734 	fdesc->flash_size = fdesc->flash_size * 0x400;
4735 
4736 	if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4737 	    fdesc->flash_version != FLASH_DESC_VERSION) {
4738 		EL(ha, "invalid descriptor table\n");
4739 		kmem_free(fdesc, sizeof (flash_desc_t));
4740 		return (QL_FUNCTION_FAILED);
4741 	}
4742 
4743 	bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4744 	kmem_free(fdesc, sizeof (flash_desc_t));
4745 
4746 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4747 
4748 	return (QL_SUCCESS);
4749 }
4750 
4751 /*
4752  * ql_setup_flash
4753  *	Gets the manufacturer and id number of the flash chip, and
4754  *	sets up the size parameter.
4755  *
4756  * Input:
4757  *	ha:	adapter state pointer.
4758  *
4759  * Returns:
4760  *	int:	ql local function return status code.
4761  *
4762  * Context:
4763  *	Kernel context.
4764  */
4765 static int
ql_setup_flash(ql_adapter_state_t * ha)4766 ql_setup_flash(ql_adapter_state_t *ha)
4767 {
4768 	ql_xioctl_t	*xp = ha->xioctl;
4769 	int		rval = QL_SUCCESS;
4770 
4771 	if (xp->fdesc.flash_size != 0) {
4772 		return (rval);
4773 	}
4774 
4775 	if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4776 		return (QL_FUNCTION_FAILED);
4777 	}
4778 
4779 	if (CFG_IST(ha, CFG_CTRL_258081)) {
4780 		/*
4781 		 * Temporarily set the ha->xioctl->fdesc.flash_size to
4782 		 * 25xx flash size to avoid failing of ql_dump_focde.
4783 		 */
4784 		if (CFG_IST(ha, CFG_CTRL_8021)) {
4785 			ha->xioctl->fdesc.flash_size = 0x800000;
4786 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
4787 			ha->xioctl->fdesc.flash_size = 0x200000;
4788 		} else {
4789 			ha->xioctl->fdesc.flash_size = 0x400000;
4790 		}
4791 
4792 		if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4793 			EL(ha, "flash desc table ok, exit\n");
4794 			return (rval);
4795 		}
4796 		if (CFG_IST(ha, CFG_CTRL_8021)) {
4797 			xp->fdesc.flash_manuf = WINBOND_FLASH;
4798 			xp->fdesc.flash_id = WINBOND_FLASHID;
4799 			xp->fdesc.flash_len = 0x17;
4800 		} else {
4801 			(void) ql_24xx_flash_id(ha);
4802 		}
4803 
4804 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
4805 		(void) ql_24xx_flash_id(ha);
4806 	} else {
4807 		ql_flash_enable(ha);
4808 
4809 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4810 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4811 		ql_write_flash_byte(ha, 0x5555, 0x90);
4812 		xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4813 
4814 		if (CFG_IST(ha, CFG_SBUS_CARD)) {
4815 			ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4816 			ql_write_flash_byte(ha, 0x5555, 0x55);
4817 			ql_write_flash_byte(ha, 0xaaaa, 0x90);
4818 			xp->fdesc.flash_id = (uint16_t)
4819 			    ql_read_flash_byte(ha, 0x0002);
4820 		} else {
4821 			ql_write_flash_byte(ha, 0x5555, 0xaa);
4822 			ql_write_flash_byte(ha, 0x2aaa, 0x55);
4823 			ql_write_flash_byte(ha, 0x5555, 0x90);
4824 			xp->fdesc.flash_id = (uint16_t)
4825 			    ql_read_flash_byte(ha, 0x0001);
4826 		}
4827 
4828 		ql_write_flash_byte(ha, 0x5555, 0xaa);
4829 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
4830 		ql_write_flash_byte(ha, 0x5555, 0xf0);
4831 
4832 		ql_flash_disable(ha);
4833 	}
4834 
4835 	/* Default flash descriptor table. */
4836 	xp->fdesc.write_statusreg_cmd = 1;
4837 	xp->fdesc.write_enable_bits = 0;
4838 	xp->fdesc.unprotect_sector_cmd = 0;
4839 	xp->fdesc.protect_sector_cmd = 0;
4840 	xp->fdesc.write_disable_bits = 0x9c;
4841 	xp->fdesc.block_size = 0x10000;
4842 	xp->fdesc.erase_cmd = 0xd8;
4843 
4844 	switch (xp->fdesc.flash_manuf) {
4845 	case AMD_FLASH:
4846 		switch (xp->fdesc.flash_id) {
4847 		case SPAN_FLASHID_2048K:
4848 			xp->fdesc.flash_size = 0x200000;
4849 			break;
4850 		case AMD_FLASHID_1024K:
4851 			xp->fdesc.flash_size = 0x100000;
4852 			break;
4853 		case AMD_FLASHID_512K:
4854 		case AMD_FLASHID_512Kt:
4855 		case AMD_FLASHID_512Kb:
4856 			if (CFG_IST(ha, CFG_SBUS_CARD)) {
4857 				xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4858 			} else {
4859 				xp->fdesc.flash_size = 0x80000;
4860 			}
4861 			break;
4862 		case AMD_FLASHID_128K:
4863 			xp->fdesc.flash_size = 0x20000;
4864 			break;
4865 		default:
4866 			rval = QL_FUNCTION_FAILED;
4867 			break;
4868 		}
4869 		break;
4870 	case ST_FLASH:
4871 		switch (xp->fdesc.flash_id) {
4872 		case ST_FLASHID_128K:
4873 			xp->fdesc.flash_size = 0x20000;
4874 			break;
4875 		case ST_FLASHID_512K:
4876 			xp->fdesc.flash_size = 0x80000;
4877 			break;
4878 		case ST_FLASHID_M25PXX:
4879 			if (xp->fdesc.flash_len == 0x14) {
4880 				xp->fdesc.flash_size = 0x100000;
4881 			} else if (xp->fdesc.flash_len == 0x15) {
4882 				xp->fdesc.flash_size = 0x200000;
4883 			} else {
4884 				rval = QL_FUNCTION_FAILED;
4885 			}
4886 			break;
4887 		default:
4888 			rval = QL_FUNCTION_FAILED;
4889 			break;
4890 		}
4891 		break;
4892 	case SST_FLASH:
4893 		switch (xp->fdesc.flash_id) {
4894 		case SST_FLASHID_128K:
4895 			xp->fdesc.flash_size = 0x20000;
4896 			break;
4897 		case SST_FLASHID_1024K_A:
4898 			xp->fdesc.flash_size = 0x100000;
4899 			xp->fdesc.block_size = 0x8000;
4900 			xp->fdesc.erase_cmd = 0x52;
4901 			break;
4902 		case SST_FLASHID_1024K:
4903 		case SST_FLASHID_1024K_B:
4904 			xp->fdesc.flash_size = 0x100000;
4905 			break;
4906 		case SST_FLASHID_2048K:
4907 			xp->fdesc.flash_size = 0x200000;
4908 			break;
4909 		default:
4910 			rval = QL_FUNCTION_FAILED;
4911 			break;
4912 		}
4913 		break;
4914 	case MXIC_FLASH:
4915 		switch (xp->fdesc.flash_id) {
4916 		case MXIC_FLASHID_512K:
4917 			xp->fdesc.flash_size = 0x80000;
4918 			break;
4919 		case MXIC_FLASHID_1024K:
4920 			xp->fdesc.flash_size = 0x100000;
4921 			break;
4922 		case MXIC_FLASHID_25LXX:
4923 			if (xp->fdesc.flash_len == 0x14) {
4924 				xp->fdesc.flash_size = 0x100000;
4925 			} else if (xp->fdesc.flash_len == 0x15) {
4926 				xp->fdesc.flash_size = 0x200000;
4927 			} else {
4928 				rval = QL_FUNCTION_FAILED;
4929 			}
4930 			break;
4931 		default:
4932 			rval = QL_FUNCTION_FAILED;
4933 			break;
4934 		}
4935 		break;
4936 	case ATMEL_FLASH:
4937 		switch (xp->fdesc.flash_id) {
4938 		case ATMEL_FLASHID_1024K:
4939 			xp->fdesc.flash_size = 0x100000;
4940 			xp->fdesc.write_disable_bits = 0xbc;
4941 			xp->fdesc.unprotect_sector_cmd = 0x39;
4942 			xp->fdesc.protect_sector_cmd = 0x36;
4943 			break;
4944 		default:
4945 			rval = QL_FUNCTION_FAILED;
4946 			break;
4947 		}
4948 		break;
4949 	case WINBOND_FLASH:
4950 		switch (xp->fdesc.flash_id) {
4951 		case WINBOND_FLASHID:
4952 			if (xp->fdesc.flash_len == 0x15) {
4953 				xp->fdesc.flash_size = 0x200000;
4954 			} else if (xp->fdesc.flash_len == 0x16) {
4955 				xp->fdesc.flash_size = 0x400000;
4956 			} else if (xp->fdesc.flash_len == 0x17) {
4957 				xp->fdesc.flash_size = 0x800000;
4958 			} else {
4959 				rval = QL_FUNCTION_FAILED;
4960 			}
4961 			break;
4962 		default:
4963 			rval = QL_FUNCTION_FAILED;
4964 			break;
4965 		}
4966 		break;
4967 	case INTEL_FLASH:
4968 		switch (xp->fdesc.flash_id) {
4969 		case INTEL_FLASHID:
4970 			if (xp->fdesc.flash_len == 0x11) {
4971 				xp->fdesc.flash_size = 0x200000;
4972 			} else if (xp->fdesc.flash_len == 0x12) {
4973 				xp->fdesc.flash_size = 0x400000;
4974 			} else if (xp->fdesc.flash_len == 0x13) {
4975 				xp->fdesc.flash_size = 0x800000;
4976 			} else {
4977 				rval = QL_FUNCTION_FAILED;
4978 			}
4979 			break;
4980 		default:
4981 			rval = QL_FUNCTION_FAILED;
4982 			break;
4983 		}
4984 		break;
4985 	default:
4986 		rval = QL_FUNCTION_FAILED;
4987 		break;
4988 	}
4989 
4990 	/* Try flash table later. */
4991 	if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_24258081)) {
4992 		EL(ha, "no default id\n");
4993 		return (QL_SUCCESS);
4994 	}
4995 
4996 	/*
4997 	 * hack for non std 2312 and 6312 boards. hardware people need to
4998 	 * use either the 128k flash chip (original), or something larger.
4999 	 * For driver purposes, we'll treat it as a 128k flash chip.
5000 	 */
5001 	if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
5002 	    ha->device_id == 0x2322 || ha->device_id == 0x6322) &&
5003 	    (xp->fdesc.flash_size > 0x20000) &&
5004 	    (CFG_IST(ha, CFG_SBUS_CARD) ==  0)) {
5005 		EL(ha, "chip exceeds max size: %xh, using 128k\n",
5006 		    xp->fdesc.flash_size);
5007 		xp->fdesc.flash_size = 0x20000;
5008 	}
5009 
5010 	if (rval == QL_SUCCESS) {
5011 		EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
5012 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id,
5013 		    xp->fdesc.flash_size);
5014 	} else {
5015 		EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
5016 		    xp->fdesc.flash_manuf, xp->fdesc.flash_id);
5017 	}
5018 
5019 	return (rval);
5020 }
5021 
5022 /*
5023  * ql_flash_fcode_load
5024  *	Loads fcode data into flash from application.
5025  *
5026  * Input:
5027  *	ha:	adapter state pointer.
5028  *	bp:	user buffer address.
5029  *	size:	user buffer size.
5030  *	mode:	flags
5031  *
5032  * Returns:
5033  *
5034  * Context:
5035  *	Kernel context.
5036  */
5037 static int
ql_flash_fcode_load(ql_adapter_state_t * ha,void * bp,uint32_t bsize,int mode)5038 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5039     int mode)
5040 {
5041 	uint8_t		*bfp;
5042 	ql_xioctl_t	*xp = ha->xioctl;
5043 	int		rval = 0;
5044 
5045 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5046 
5047 	if (bsize > xp->fdesc.flash_size) {
5048 		EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
5049 		    xp->fdesc.flash_size);
5050 		return (ENOMEM);
5051 	}
5052 
5053 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5054 		EL(ha, "failed, kmem_zalloc\n");
5055 		rval = ENOMEM;
5056 	} else  {
5057 		if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
5058 			EL(ha, "failed, ddi_copyin\n");
5059 			rval = EFAULT;
5060 		} else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
5061 			EL(ha, "failed, load_fcode\n");
5062 			rval = EFAULT;
5063 		} else {
5064 			/* Reset caches on all adapter instances. */
5065 			ql_update_flash_caches(ha);
5066 			rval = 0;
5067 		}
5068 		kmem_free(bfp, bsize);
5069 	}
5070 
5071 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5072 
5073 	return (rval);
5074 }
5075 
5076 /*
5077  * ql_load_fcode
5078  *	Loads fcode in to flash.
5079  *
5080  * Input:
5081  *	ha:	adapter state pointer.
5082  *	dp:	data pointer.
5083  *	size:	data length.
5084  *	addr:	flash byte address.
5085  *
5086  * Returns:
5087  *	ql local function return status code.
5088  *
5089  * Context:
5090  *	Kernel context.
5091  */
5092 int
ql_load_fcode(ql_adapter_state_t * ha,uint8_t * dp,uint32_t size,uint32_t addr)5093 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
5094 {
5095 	uint32_t	cnt;
5096 	int		rval;
5097 
5098 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
5099 		return (ql_24xx_load_flash(ha, dp, size, addr));
5100 	}
5101 
5102 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5103 
5104 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5105 		/*
5106 		 * sbus has an additional check to make
5107 		 * sure they don't brick the HBA.
5108 		 */
5109 		if (dp[0] != 0xf1) {
5110 			EL(ha, "failed, incorrect fcode for sbus\n");
5111 			return (QL_FUNCTION_PARAMETER_ERROR);
5112 		}
5113 	}
5114 
5115 	GLOBAL_HW_LOCK();
5116 
5117 	/* Enable Flash Read/Write. */
5118 	ql_flash_enable(ha);
5119 
5120 	/* Erase flash prior to write. */
5121 	rval = ql_erase_flash(ha, 0);
5122 
5123 	if (rval == QL_SUCCESS) {
5124 		/* Write fcode data to flash. */
5125 		for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5126 			/* Allow other system activity. */
5127 			if (cnt % 0x1000 == 0) {
5128 				drv_usecwait(1);
5129 			}
5130 			rval = ql_program_flash_address(ha, addr++, *dp++);
5131 			if (rval != QL_SUCCESS)
5132 				break;
5133 		}
5134 	}
5135 
5136 	ql_flash_disable(ha);
5137 
5138 	GLOBAL_HW_UNLOCK();
5139 
5140 	if (rval != QL_SUCCESS) {
5141 		EL(ha, "failed, rval=%xh\n", rval);
5142 	} else {
5143 		/*EMPTY*/
5144 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5145 	}
5146 	return (rval);
5147 }
5148 
5149 /*
5150  * ql_flash_fcode_dump
5151  *	Dumps FLASH to application.
5152  *
5153  * Input:
5154  *	ha:	adapter state pointer.
5155  *	bp:	user buffer address.
5156  *	bsize:	user buffer size
5157  *	faddr:	flash byte address
5158  *	mode:	flags
5159  *
5160  * Returns:
5161  *
5162  * Context:
5163  *	Kernel context.
5164  */
5165 static int
ql_flash_fcode_dump(ql_adapter_state_t * ha,void * bp,uint32_t bsize,uint32_t faddr,int mode)5166 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5167     uint32_t faddr, int mode)
5168 {
5169 	uint8_t		*bfp;
5170 	int		rval;
5171 	ql_xioctl_t	*xp = ha->xioctl;
5172 
5173 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5174 
5175 	/* adjust max read size to flash size */
5176 	if (bsize > xp->fdesc.flash_size) {
5177 		EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5178 		    xp->fdesc.flash_size);
5179 		bsize = xp->fdesc.flash_size;
5180 	}
5181 
5182 	if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5183 		EL(ha, "failed, kmem_zalloc\n");
5184 		rval = ENOMEM;
5185 	} else {
5186 		/* Dump Flash fcode. */
5187 		rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5188 
5189 		if (rval != QL_SUCCESS) {
5190 			EL(ha, "failed, dump_fcode = %x\n", rval);
5191 			rval = EFAULT;
5192 		} else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5193 			EL(ha, "failed, ddi_copyout\n");
5194 			rval = EFAULT;
5195 		} else {
5196 			rval = 0;
5197 		}
5198 		kmem_free(bfp, bsize);
5199 	}
5200 
5201 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5202 
5203 	return (rval);
5204 }
5205 
5206 /*
5207  * ql_dump_fcode
5208  *	Dumps fcode from flash.
5209  *
5210  * Input:
5211  *	ha:		adapter state pointer.
5212  *	dp:		data pointer.
5213  *	size:		data length in bytes.
5214  *	startpos:	starting position in flash (byte address).
5215  *
5216  * Returns:
5217  *	ql local function return status code.
5218  *
5219  * Context:
5220  *	Kernel context.
5221  *
5222  */
5223 int
ql_dump_fcode(ql_adapter_state_t * ha,uint8_t * dp,uint32_t size,uint32_t startpos)5224 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5225     uint32_t startpos)
5226 {
5227 	uint32_t	cnt, data, addr;
5228 	uint8_t		bp[4], *src;
5229 	int		fp_rval, rval = QL_SUCCESS;
5230 	dma_mem_t	mem;
5231 
5232 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5233 
5234 	/* make sure startpos+size doesn't exceed flash */
5235 	if (size + startpos > ha->xioctl->fdesc.flash_size) {
5236 		EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5237 		    size, startpos, ha->xioctl->fdesc.flash_size);
5238 		return (QL_FUNCTION_PARAMETER_ERROR);
5239 	}
5240 
5241 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
5242 		/* check start addr is 32 bit aligned for 24xx */
5243 		if ((startpos & 0x3) != 0) {
5244 			rval = ql_24xx_read_flash(ha,
5245 			    ha->flash_data_addr | startpos >> 2, &data);
5246 			if (rval != QL_SUCCESS) {
5247 				EL(ha, "failed2, rval = %xh\n", rval);
5248 				return (rval);
5249 			}
5250 			bp[0] = LSB(LSW(data));
5251 			bp[1] = MSB(LSW(data));
5252 			bp[2] = LSB(MSW(data));
5253 			bp[3] = MSB(MSW(data));
5254 			while (size && startpos & 0x3) {
5255 				*dp++ = bp[startpos & 0x3];
5256 				startpos++;
5257 				size--;
5258 			}
5259 			if (size == 0) {
5260 				QL_PRINT_9(CE_CONT, "(%d): done2\n",
5261 				    ha->instance);
5262 				return (rval);
5263 			}
5264 		}
5265 
5266 		/* adjust 24xx start addr for 32 bit words */
5267 		addr = startpos / 4 | ha->flash_data_addr;
5268 	}
5269 
5270 	bzero(&mem, sizeof (dma_mem_t));
5271 	/* Check for Fast page is supported */
5272 	if ((ha->pha->task_daemon_flags & FIRMWARE_UP) &&
5273 	    (CFG_IST(ha, CFG_CTRL_2581))) {
5274 		fp_rval = QL_SUCCESS;
5275 		/* Setup DMA buffer. */
5276 		rval = ql_get_dma_mem(ha, &mem, size,
5277 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN);
5278 		if (rval != QL_SUCCESS) {
5279 			EL(ha, "failed, ql_get_dma_mem=%xh\n",
5280 			    rval);
5281 			return (ENOMEM);
5282 		}
5283 	} else {
5284 		fp_rval = QL_NOT_SUPPORTED;
5285 	}
5286 
5287 	GLOBAL_HW_LOCK();
5288 
5289 	/* Enable Flash Read/Write. */
5290 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5291 		ql_flash_enable(ha);
5292 	}
5293 
5294 	/* Read fcode data from flash. */
5295 	while (size) {
5296 		/* Allow other system activity. */
5297 		if (size % 0x1000 == 0) {
5298 			ql_delay(ha, 100000);
5299 		}
5300 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
5301 			if (fp_rval == QL_SUCCESS && (addr & 0x3f) == 0) {
5302 				cnt = (size + 3) >> 2;
5303 				fp_rval = ql_rd_risc_ram(ha, addr,
5304 				    mem.cookie.dmac_laddress, cnt);
5305 				if (fp_rval == QL_SUCCESS) {
5306 					for (src = mem.bp; size; size--) {
5307 						*dp++ = *src++;
5308 					}
5309 					addr += cnt;
5310 					continue;
5311 				}
5312 			}
5313 			rval = ql_24xx_read_flash(ha, addr++,
5314 			    &data);
5315 			if (rval != QL_SUCCESS) {
5316 				break;
5317 			}
5318 			bp[0] = LSB(LSW(data));
5319 			bp[1] = MSB(LSW(data));
5320 			bp[2] = LSB(MSW(data));
5321 			bp[3] = MSB(MSW(data));
5322 			for (cnt = 0; size && cnt < 4; size--) {
5323 				*dp++ = bp[cnt++];
5324 			}
5325 		} else {
5326 			*dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5327 			size--;
5328 		}
5329 	}
5330 
5331 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5332 		ql_flash_disable(ha);
5333 	}
5334 
5335 	GLOBAL_HW_UNLOCK();
5336 
5337 	if (mem.dma_handle != NULL) {
5338 		ql_free_dma_resource(ha, &mem);
5339 	}
5340 
5341 	if (rval != QL_SUCCESS) {
5342 		EL(ha, "failed, rval = %xh\n", rval);
5343 	} else {
5344 		/*EMPTY*/
5345 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5346 	}
5347 	return (rval);
5348 }
5349 
5350 /*
5351  * ql_program_flash_address
5352  *	Program flash address.
5353  *
5354  * Input:
5355  *	ha:	adapter state pointer.
5356  *	addr:	flash byte address.
5357  *	data:	data to be written to flash.
5358  *
5359  * Returns:
5360  *	ql local function return status code.
5361  *
5362  * Context:
5363  *	Kernel context.
5364  */
5365 static int
ql_program_flash_address(ql_adapter_state_t * ha,uint32_t addr,uint8_t data)5366 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr,
5367     uint8_t data)
5368 {
5369 	int	rval;
5370 
5371 	/* Write Program Command Sequence */
5372 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
5373 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5374 		ql_write_flash_byte(ha, addr, data);
5375 	} else {
5376 		ql_write_flash_byte(ha, 0x5555, 0xaa);
5377 		ql_write_flash_byte(ha, 0x2aaa, 0x55);
5378 		ql_write_flash_byte(ha, 0x5555, 0xa0);
5379 		ql_write_flash_byte(ha, addr, data);
5380 	}
5381 
5382 	/* Wait for write to complete. */
5383 	rval = ql_poll_flash(ha, addr, data);
5384 
5385 	if (rval != QL_SUCCESS) {
5386 		EL(ha, "failed, rval=%xh\n", rval);
5387 	}
5388 	return (rval);
5389 }
5390 
5391 /*
5392  * ql_set_rnid_parameters
5393  *	Set RNID parameters.
5394  *
5395  * Input:
5396  *	ha:	adapter state pointer.
5397  *	cmd:	User space CT arguments pointer.
5398  *	mode:	flags.
5399  */
5400 static void
ql_set_rnid_parameters(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5401 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5402 {
5403 	EXT_SET_RNID_REQ	tmp_set;
5404 	EXT_RNID_DATA		*tmp_buf;
5405 	int			rval = 0;
5406 
5407 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5408 
5409 	if (DRIVER_SUSPENDED(ha)) {
5410 		EL(ha, "failed, LOOP_NOT_READY\n");
5411 		cmd->Status = EXT_STATUS_BUSY;
5412 		cmd->ResponseLen = 0;
5413 		return;
5414 	}
5415 
5416 	cmd->ResponseLen = 0; /* NO response to caller. */
5417 	if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5418 		/* parameter error */
5419 		EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5420 		    cmd->RequestLen);
5421 		cmd->Status = EXT_STATUS_INVALID_PARAM;
5422 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5423 		cmd->ResponseLen = 0;
5424 		return;
5425 	}
5426 
5427 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5428 	    cmd->RequestLen, mode);
5429 	if (rval != 0) {
5430 		EL(ha, "failed, ddi_copyin\n");
5431 		cmd->Status = EXT_STATUS_COPY_ERR;
5432 		cmd->ResponseLen = 0;
5433 		return;
5434 	}
5435 
5436 	/* Allocate memory for command. */
5437 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5438 	if (tmp_buf == NULL) {
5439 		EL(ha, "failed, kmem_zalloc\n");
5440 		cmd->Status = EXT_STATUS_NO_MEMORY;
5441 		cmd->ResponseLen = 0;
5442 		return;
5443 	}
5444 
5445 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5446 	    (caddr_t)tmp_buf);
5447 	if (rval != QL_SUCCESS) {
5448 		/* error */
5449 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5450 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5451 		cmd->Status = EXT_STATUS_ERR;
5452 		cmd->ResponseLen = 0;
5453 		return;
5454 	}
5455 
5456 	/* Now set the requested params. */
5457 	bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5458 	bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5459 	bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5460 
5461 	rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5462 	    (caddr_t)tmp_buf);
5463 	if (rval != QL_SUCCESS) {
5464 		/* error */
5465 		EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5466 		cmd->Status = EXT_STATUS_ERR;
5467 		cmd->ResponseLen = 0;
5468 	}
5469 
5470 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5471 
5472 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5473 }
5474 
5475 /*
5476  * ql_get_rnid_parameters
5477  *	Get RNID parameters.
5478  *
5479  * Input:
5480  *	ha:	adapter state pointer.
5481  *	cmd:	User space CT arguments pointer.
5482  *	mode:	flags.
5483  */
5484 static void
ql_get_rnid_parameters(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5485 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5486 {
5487 	EXT_RNID_DATA	*tmp_buf;
5488 	uint32_t	rval;
5489 
5490 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5491 
5492 	if (DRIVER_SUSPENDED(ha)) {
5493 		EL(ha, "failed, LOOP_NOT_READY\n");
5494 		cmd->Status = EXT_STATUS_BUSY;
5495 		cmd->ResponseLen = 0;
5496 		return;
5497 	}
5498 
5499 	/* Allocate memory for command. */
5500 	tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5501 	if (tmp_buf == NULL) {
5502 		EL(ha, "failed, kmem_zalloc\n");
5503 		cmd->Status = EXT_STATUS_NO_MEMORY;
5504 		cmd->ResponseLen = 0;
5505 		return;
5506 	}
5507 
5508 	/* Send command */
5509 	rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5510 	    (caddr_t)tmp_buf);
5511 	if (rval != QL_SUCCESS) {
5512 		/* error */
5513 		EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5514 		kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5515 		cmd->Status = EXT_STATUS_ERR;
5516 		cmd->ResponseLen = 0;
5517 		return;
5518 	}
5519 
5520 	/* Copy the response */
5521 	if (ql_send_buffer_data((caddr_t)tmp_buf,
5522 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
5523 	    sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5524 		EL(ha, "failed, ddi_copyout\n");
5525 		cmd->Status = EXT_STATUS_COPY_ERR;
5526 		cmd->ResponseLen = 0;
5527 	} else {
5528 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5529 		cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5530 	}
5531 
5532 	kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5533 }
5534 
5535 /*
5536  * ql_reset_statistics
5537  *	Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5538  *
5539  * Input:
5540  *	ha:	adapter state pointer.
5541  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5542  *
5543  * Returns:
5544  *	None, request status indicated in cmd->Status.
5545  *
5546  * Context:
5547  *	Kernel context.
5548  */
5549 static int
ql_reset_statistics(ql_adapter_state_t * ha,EXT_IOCTL * cmd)5550 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5551 {
5552 	ql_xioctl_t		*xp = ha->xioctl;
5553 	int			rval = 0;
5554 
5555 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5556 
5557 	if (DRIVER_SUSPENDED(ha)) {
5558 		EL(ha, "failed, LOOP_NOT_READY\n");
5559 		cmd->Status = EXT_STATUS_BUSY;
5560 		cmd->ResponseLen = 0;
5561 		return (QL_FUNCTION_SUSPENDED);
5562 	}
5563 
5564 	rval = ql_reset_link_status(ha);
5565 	if (rval != QL_SUCCESS) {
5566 		EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5567 		cmd->Status = EXT_STATUS_MAILBOX;
5568 		cmd->DetailStatus = rval;
5569 		cmd->ResponseLen = 0;
5570 	}
5571 
5572 	TASK_DAEMON_LOCK(ha);
5573 	xp->IosRequested = 0;
5574 	xp->BytesRequested = 0;
5575 	xp->IOInputRequests = 0;
5576 	xp->IOOutputRequests = 0;
5577 	xp->IOControlRequests = 0;
5578 	xp->IOInputMByteCnt = 0;
5579 	xp->IOOutputMByteCnt = 0;
5580 	xp->IOOutputByteCnt = 0;
5581 	xp->IOInputByteCnt = 0;
5582 	TASK_DAEMON_UNLOCK(ha);
5583 
5584 	INTR_LOCK(ha);
5585 	xp->ControllerErrorCount = 0;
5586 	xp->DeviceErrorCount = 0;
5587 	xp->TotalLipResets = 0;
5588 	xp->TotalInterrupts = 0;
5589 	INTR_UNLOCK(ha);
5590 
5591 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5592 
5593 	return (rval);
5594 }
5595 
5596 /*
5597  * ql_get_statistics
5598  *	Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5599  *
5600  * Input:
5601  *	ha:	adapter state pointer.
5602  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5603  *	mode:	flags.
5604  *
5605  * Returns:
5606  *	None, request status indicated in cmd->Status.
5607  *
5608  * Context:
5609  *	Kernel context.
5610  */
5611 static void
ql_get_statistics(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5612 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5613 {
5614 	EXT_HBA_PORT_STAT	ps = {0};
5615 	ql_link_stats_t		*ls;
5616 	int			rval;
5617 	ql_xioctl_t		*xp = ha->xioctl;
5618 	int			retry = 10;
5619 
5620 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5621 
5622 	while (ha->task_daemon_flags &
5623 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5624 		ql_delay(ha, 10000000);	/* 10 second delay */
5625 
5626 		retry--;
5627 
5628 		if (retry == 0) { /* effectively 100 seconds */
5629 			EL(ha, "failed, LOOP_NOT_READY\n");
5630 			cmd->Status = EXT_STATUS_BUSY;
5631 			cmd->ResponseLen = 0;
5632 			return;
5633 		}
5634 	}
5635 
5636 	/* Allocate memory for command. */
5637 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5638 	if (ls == NULL) {
5639 		EL(ha, "failed, kmem_zalloc\n");
5640 		cmd->Status = EXT_STATUS_NO_MEMORY;
5641 		cmd->ResponseLen = 0;
5642 		return;
5643 	}
5644 
5645 	/*
5646 	 * I think these are supposed to be port statistics
5647 	 * the loop ID or port ID should be in cmd->Instance.
5648 	 */
5649 	rval = ql_get_status_counts(ha, (uint16_t)
5650 	    (ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id),
5651 	    sizeof (ql_link_stats_t), (caddr_t)ls, 0);
5652 	if (rval != QL_SUCCESS) {
5653 		EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
5654 		    ha->loop_id);
5655 		cmd->Status = EXT_STATUS_MAILBOX;
5656 		cmd->DetailStatus = rval;
5657 		cmd->ResponseLen = 0;
5658 	} else {
5659 		ps.ControllerErrorCount = xp->ControllerErrorCount;
5660 		ps.DeviceErrorCount = xp->DeviceErrorCount;
5661 		ps.IoCount = (uint32_t)(xp->IOInputRequests +
5662 		    xp->IOOutputRequests + xp->IOControlRequests);
5663 		ps.MBytesCount = (uint32_t)(xp->IOInputMByteCnt +
5664 		    xp->IOOutputMByteCnt);
5665 		ps.LipResetCount = xp->TotalLipResets;
5666 		ps.InterruptCount = xp->TotalInterrupts;
5667 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5668 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5669 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5670 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5671 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5672 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5673 
5674 		rval = ddi_copyout((void *)&ps,
5675 		    (void *)(uintptr_t)cmd->ResponseAdr,
5676 		    sizeof (EXT_HBA_PORT_STAT), mode);
5677 		if (rval != 0) {
5678 			EL(ha, "failed, ddi_copyout\n");
5679 			cmd->Status = EXT_STATUS_COPY_ERR;
5680 			cmd->ResponseLen = 0;
5681 		} else {
5682 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5683 		}
5684 	}
5685 
5686 	kmem_free(ls, sizeof (ql_link_stats_t));
5687 
5688 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5689 }
5690 
5691 /*
5692  * ql_get_statistics_fc
5693  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5694  *
5695  * Input:
5696  *	ha:	adapter state pointer.
5697  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5698  *	mode:	flags.
5699  *
5700  * Returns:
5701  *	None, request status indicated in cmd->Status.
5702  *
5703  * Context:
5704  *	Kernel context.
5705  */
5706 static void
ql_get_statistics_fc(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5707 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5708 {
5709 	EXT_HBA_PORT_STAT	ps = {0};
5710 	ql_link_stats_t		*ls;
5711 	int			rval;
5712 	uint16_t		qlnt;
5713 	EXT_DEST_ADDR		pextdestaddr;
5714 	uint8_t			*name;
5715 	ql_tgt_t		*tq = NULL;
5716 	int			retry = 10;
5717 
5718 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5719 
5720 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5721 	    (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5722 		EL(ha, "failed, ddi_copyin\n");
5723 		cmd->Status = EXT_STATUS_COPY_ERR;
5724 		cmd->ResponseLen = 0;
5725 		return;
5726 	}
5727 
5728 	qlnt = QLNT_PORT;
5729 	name = pextdestaddr.DestAddr.WWPN;
5730 
5731 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5732 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
5733 	    name[5], name[6], name[7]);
5734 
5735 	tq = ql_find_port(ha, name, qlnt);
5736 
5737 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5738 		EL(ha, "failed, fc_port not found\n");
5739 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5740 		cmd->ResponseLen = 0;
5741 		return;
5742 	}
5743 
5744 	while (ha->task_daemon_flags &
5745 	    (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE  | DRIVER_STALL)) {
5746 		ql_delay(ha, 10000000);	/* 10 second delay */
5747 
5748 		retry--;
5749 
5750 		if (retry == 0) { /* effectively 100 seconds */
5751 			EL(ha, "failed, LOOP_NOT_READY\n");
5752 			cmd->Status = EXT_STATUS_BUSY;
5753 			cmd->ResponseLen = 0;
5754 			return;
5755 		}
5756 	}
5757 
5758 	/* Allocate memory for command. */
5759 	ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5760 	if (ls == NULL) {
5761 		EL(ha, "failed, kmem_zalloc\n");
5762 		cmd->Status = EXT_STATUS_NO_MEMORY;
5763 		cmd->ResponseLen = 0;
5764 		return;
5765 	}
5766 
5767 	rval = ql_get_link_status(ha, tq->loop_id, sizeof (ql_link_stats_t),
5768 	    (caddr_t)ls, 0);
5769 	if (rval != QL_SUCCESS) {
5770 		EL(ha, "failed, get_link_status=%xh, d_id=%xh\n", rval,
5771 		    tq->d_id.b24);
5772 		cmd->Status = EXT_STATUS_MAILBOX;
5773 		cmd->DetailStatus = rval;
5774 		cmd->ResponseLen = 0;
5775 	} else {
5776 		ps.LinkFailureCount = LE_32(ls->link_fail_cnt);
5777 		ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5778 		ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5779 		ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5780 		ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5781 		ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5782 
5783 		rval = ddi_copyout((void *)&ps,
5784 		    (void *)(uintptr_t)cmd->ResponseAdr,
5785 		    sizeof (EXT_HBA_PORT_STAT), mode);
5786 
5787 		if (rval != 0) {
5788 			EL(ha, "failed, ddi_copyout\n");
5789 			cmd->Status = EXT_STATUS_COPY_ERR;
5790 			cmd->ResponseLen = 0;
5791 		} else {
5792 			cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5793 		}
5794 	}
5795 
5796 	kmem_free(ls, sizeof (ql_link_stats_t));
5797 
5798 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5799 }
5800 
5801 /*
5802  * ql_get_statistics_fc4
5803  *	Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5804  *
5805  * Input:
5806  *	ha:	adapter state pointer.
5807  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5808  *	mode:	flags.
5809  *
5810  * Returns:
5811  *	None, request status indicated in cmd->Status.
5812  *
5813  * Context:
5814  *	Kernel context.
5815  */
5816 static void
ql_get_statistics_fc4(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5817 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5818 {
5819 	uint32_t		rval;
5820 	EXT_HBA_FC4STATISTICS	fc4stats = {0};
5821 	ql_xioctl_t		*xp = ha->xioctl;
5822 
5823 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5824 
5825 	fc4stats.InputRequests = xp->IOInputRequests;
5826 	fc4stats.OutputRequests = xp->IOOutputRequests;
5827 	fc4stats.ControlRequests = xp->IOControlRequests;
5828 	fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5829 	fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5830 
5831 	rval = ddi_copyout((void *)&fc4stats,
5832 	    (void *)(uintptr_t)cmd->ResponseAdr,
5833 	    sizeof (EXT_HBA_FC4STATISTICS), mode);
5834 
5835 	if (rval != 0) {
5836 		EL(ha, "failed, ddi_copyout\n");
5837 		cmd->Status = EXT_STATUS_COPY_ERR;
5838 		cmd->ResponseLen = 0;
5839 	} else {
5840 		cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5841 	}
5842 
5843 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5844 }
5845 
5846 /*
5847  * ql_set_led_state
5848  *	Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5849  *
5850  * Input:
5851  *	ha:	adapter state pointer.
5852  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5853  *	mode:	flags.
5854  *
5855  * Returns:
5856  *	None, request status indicated in cmd->Status.
5857  *
5858  * Context:
5859  *	Kernel context.
5860  */
5861 static void
ql_set_led_state(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5862 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5863 {
5864 	EXT_BEACON_CONTROL	bstate;
5865 	uint32_t		rval;
5866 	ql_xioctl_t		*xp = ha->xioctl;
5867 
5868 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5869 
5870 	if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5871 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5872 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5873 		EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
5874 		    " Len=%xh\n", cmd->RequestLen);
5875 		cmd->ResponseLen = 0;
5876 		return;
5877 	}
5878 
5879 	if (ha->device_id < 0x2300) {
5880 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5881 		cmd->DetailStatus = 0;
5882 		EL(ha, "done - failed, Invalid function for HBA model\n");
5883 		cmd->ResponseLen = 0;
5884 		return;
5885 	}
5886 
5887 	rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5888 	    cmd->RequestLen, mode);
5889 
5890 	if (rval != 0) {
5891 		cmd->Status = EXT_STATUS_COPY_ERR;
5892 		EL(ha, "done -  failed, ddi_copyin\n");
5893 		return;
5894 	}
5895 
5896 	switch (bstate.State) {
5897 	case EXT_DEF_GRN_BLINK_OFF:	/* turn beacon off */
5898 		if (xp->ledstate.BeaconState == BEACON_OFF) {
5899 			/* not quite an error -- LED state is already off */
5900 			cmd->Status = EXT_STATUS_OK;
5901 			EL(ha, "LED off request -- LED is already off\n");
5902 			break;
5903 		}
5904 
5905 		xp->ledstate.BeaconState = BEACON_OFF;
5906 		xp->ledstate.LEDflags = LED_ALL_OFF;
5907 
5908 		if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5909 			cmd->Status = EXT_STATUS_MAILBOX;
5910 		} else {
5911 			cmd->Status = EXT_STATUS_OK;
5912 		}
5913 		break;
5914 
5915 	case EXT_DEF_GRN_BLINK_ON:	/* turn beacon on */
5916 		if (xp->ledstate.BeaconState == BEACON_ON) {
5917 			/* not quite an error -- LED state is already on */
5918 			cmd->Status = EXT_STATUS_OK;
5919 			EL(ha, "LED on request  - LED is already on\n");
5920 			break;
5921 		}
5922 
5923 		if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5924 			cmd->Status = EXT_STATUS_MAILBOX;
5925 			break;
5926 		}
5927 
5928 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
5929 			xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5930 		} else {
5931 			xp->ledstate.LEDflags = LED_GREEN;
5932 		}
5933 		xp->ledstate.BeaconState = BEACON_ON;
5934 
5935 		cmd->Status = EXT_STATUS_OK;
5936 		break;
5937 	default:
5938 		cmd->Status = EXT_STATUS_ERR;
5939 		EL(ha, "failed, unknown state request %xh\n", bstate.State);
5940 		break;
5941 	}
5942 
5943 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5944 }
5945 
5946 /*
5947  * ql_get_led_state
5948  *	Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5949  *
5950  * Input:
5951  *	ha:	adapter state pointer.
5952  *	cmd:	Local EXT_IOCTL cmd struct pointer.
5953  *	mode:	flags.
5954  *
5955  * Returns:
5956  *	None, request status indicated in cmd->Status.
5957  *
5958  * Context:
5959  *	Kernel context.
5960  */
5961 static void
ql_get_led_state(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)5962 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5963 {
5964 	EXT_BEACON_CONTROL	bstate = {0};
5965 	uint32_t		rval;
5966 	ql_xioctl_t		*xp = ha->xioctl;
5967 
5968 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5969 
5970 	if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5971 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5972 		cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5973 		EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
5974 		    "Len=%xh\n", cmd->ResponseLen);
5975 		cmd->ResponseLen = 0;
5976 		return;
5977 	}
5978 
5979 	if (ha->device_id < 0x2300) {
5980 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5981 		cmd->DetailStatus = 0;
5982 		EL(ha, "done - failed, Invalid function for HBA model\n");
5983 		cmd->ResponseLen = 0;
5984 		return;
5985 	}
5986 
5987 	if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5988 		cmd->Status = EXT_STATUS_BUSY;
5989 		EL(ha, "done -  failed, isp abort active\n");
5990 		cmd->ResponseLen = 0;
5991 		return;
5992 	}
5993 
5994 	/* inform the user of the current beacon state (off or on) */
5995 	bstate.State = xp->ledstate.BeaconState;
5996 
5997 	rval = ddi_copyout((void *)&bstate,
5998 	    (void *)(uintptr_t)cmd->ResponseAdr,
5999 	    sizeof (EXT_BEACON_CONTROL), mode);
6000 
6001 	if (rval != 0) {
6002 		EL(ha, "failed, ddi_copyout\n");
6003 		cmd->Status = EXT_STATUS_COPY_ERR;
6004 		cmd->ResponseLen = 0;
6005 	} else {
6006 		cmd->Status = EXT_STATUS_OK;
6007 		cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
6008 	}
6009 
6010 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6011 }
6012 
6013 /*
6014  * ql_blink_led
6015  *	Determine the next state of the LED and drive it
6016  *
6017  * Input:
6018  *	ha:	adapter state pointer.
6019  *
6020  * Context:
6021  *	Interrupt context.
6022  */
6023 void
ql_blink_led(ql_adapter_state_t * ha)6024 ql_blink_led(ql_adapter_state_t *ha)
6025 {
6026 	uint32_t		nextstate;
6027 	ql_xioctl_t		*xp = ha->xioctl;
6028 
6029 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6030 
6031 	if (xp->ledstate.BeaconState == BEACON_ON) {
6032 		/* determine the next led state */
6033 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6034 			nextstate = (xp->ledstate.LEDflags) &
6035 			    (~(RD32_IO_REG(ha, gpiod)));
6036 		} else {
6037 			nextstate = (xp->ledstate.LEDflags) &
6038 			    (~(RD16_IO_REG(ha, gpiod)));
6039 		}
6040 
6041 		/* turn the led on or off */
6042 		ql_drive_led(ha, nextstate);
6043 	}
6044 
6045 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6046 }
6047 
6048 /*
6049  * ql_drive_led
6050  *	drive the led's as determined by LEDflags
6051  *
6052  * Input:
6053  *	ha:		adapter state pointer.
6054  *	LEDflags:	LED flags
6055  *
6056  * Context:
6057  *	Kernel/Interrupt context.
6058  */
6059 static void
ql_drive_led(ql_adapter_state_t * ha,uint32_t LEDflags)6060 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
6061 {
6062 
6063 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6064 
6065 	if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
6066 
6067 		uint16_t	gpio_enable, gpio_data;
6068 
6069 		/* setup to send new data */
6070 		gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
6071 		gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
6072 		WRT16_IO_REG(ha, gpioe, gpio_enable);
6073 
6074 		/* read current data and clear out old led data */
6075 		gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
6076 		gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
6077 
6078 		/* set in the new led data. */
6079 		gpio_data = (uint16_t)(gpio_data | LEDflags);
6080 
6081 		/* write out the new led data */
6082 		WRT16_IO_REG(ha, gpiod, gpio_data);
6083 
6084 	} else if (CFG_IST(ha, CFG_CTRL_24258081)) {
6085 
6086 		uint32_t	gpio_data;
6087 
6088 		/* setup to send new data */
6089 		gpio_data = RD32_IO_REG(ha, gpiod);
6090 		gpio_data |= LED_MASK_UPDATE_24;
6091 		WRT32_IO_REG(ha, gpiod, gpio_data);
6092 
6093 		/* read current data and clear out old led data */
6094 		gpio_data = RD32_IO_REG(ha, gpiod);
6095 		gpio_data &= ~LED_MASK_COLORS_24;
6096 
6097 		/* set in the new led data */
6098 		gpio_data |= LEDflags;
6099 
6100 		/* write out the new led data */
6101 		WRT32_IO_REG(ha, gpiod, gpio_data);
6102 
6103 	} else {
6104 		EL(ha, "unsupported HBA: %xh", ha->device_id);
6105 	}
6106 
6107 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6108 }
6109 
6110 /*
6111  * ql_setup_led
6112  *	Setup LED for driver control
6113  *
6114  * Input:
6115  *	ha:	adapter state pointer.
6116  *
6117  * Context:
6118  *	Kernel/Interrupt context.
6119  */
6120 static uint32_t
ql_setup_led(ql_adapter_state_t * ha)6121 ql_setup_led(ql_adapter_state_t *ha)
6122 {
6123 	uint32_t	rval;
6124 	ql_mbx_data_t	mr;
6125 
6126 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6127 
6128 	/* decouple the LED control from the fw */
6129 	rval = ql_get_firmware_option(ha, &mr);
6130 	if (rval != QL_SUCCESS) {
6131 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6132 		return (rval);
6133 	}
6134 
6135 	/* set the appropriate options */
6136 	mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
6137 
6138 	/* send it back to the firmware */
6139 	rval = ql_set_firmware_option(ha, &mr);
6140 	if (rval != QL_SUCCESS) {
6141 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6142 		return (rval);
6143 	}
6144 
6145 	/* initally, turn the LED's off */
6146 	ql_drive_led(ha, LED_ALL_OFF);
6147 
6148 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6149 
6150 	return (rval);
6151 }
6152 
6153 /*
6154  * ql_wrapup_led
6155  *	Return LED control to the firmware
6156  *
6157  * Input:
6158  *	ha:	adapter state pointer.
6159  *
6160  * Context:
6161  *	Kernel/Interrupt context.
6162  */
6163 static uint32_t
ql_wrapup_led(ql_adapter_state_t * ha)6164 ql_wrapup_led(ql_adapter_state_t *ha)
6165 {
6166 	uint32_t	rval;
6167 	ql_mbx_data_t	mr;
6168 
6169 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6170 
6171 	/* Turn all LED's off */
6172 	ql_drive_led(ha, LED_ALL_OFF);
6173 
6174 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
6175 
6176 		uint32_t	gpio_data;
6177 
6178 		/* disable the LED update mask */
6179 		gpio_data = RD32_IO_REG(ha, gpiod);
6180 		gpio_data &= ~LED_MASK_UPDATE_24;
6181 
6182 		/* write out the data */
6183 		WRT32_IO_REG(ha, gpiod, gpio_data);
6184 	}
6185 
6186 	/* give LED control back to the f/w */
6187 	rval = ql_get_firmware_option(ha, &mr);
6188 	if (rval != QL_SUCCESS) {
6189 		EL(ha, "failed, get_firmware_option=%xh\n", rval);
6190 		return (rval);
6191 	}
6192 
6193 	mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6194 
6195 	rval = ql_set_firmware_option(ha, &mr);
6196 	if (rval != QL_SUCCESS) {
6197 		EL(ha, "failed, set_firmware_option=%xh\n", rval);
6198 		return (rval);
6199 	}
6200 
6201 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6202 
6203 	return (rval);
6204 }
6205 
6206 /*
6207  * ql_get_port_summary
6208  *	Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6209  *
6210  *	The EXT_IOCTL->RequestAdr points to a single
6211  *	UINT32 which identifies the device type.
6212  *
6213  * Input:
6214  *	ha:	adapter state pointer.
6215  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6216  *	mode:	flags.
6217  *
6218  * Returns:
6219  *	None, request status indicated in cmd->Status.
6220  *
6221  * Context:
6222  *	Kernel context.
6223  */
6224 static void
ql_get_port_summary(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)6225 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6226 {
6227 	EXT_DEVICEDATA		dd = {0};
6228 	EXT_DEVICEDATA		*uddp;
6229 	ql_link_t		*link;
6230 	ql_tgt_t		*tq;
6231 	uint32_t		rlen, dev_type, index;
6232 	int			rval = 0;
6233 	EXT_DEVICEDATAENTRY	*uddep, *ddep;
6234 
6235 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6236 
6237 	ddep = &dd.EntryList[0];
6238 
6239 	/*
6240 	 * Get the type of device the requestor is looking for.
6241 	 *
6242 	 * We ignore this for now.
6243 	 */
6244 	rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6245 	    (void *)&dev_type, sizeof (dev_type), mode);
6246 	if (rval != 0) {
6247 		cmd->Status = EXT_STATUS_COPY_ERR;
6248 		cmd->ResponseLen = 0;
6249 		EL(ha, "failed, ddi_copyin\n");
6250 		return;
6251 	}
6252 	/*
6253 	 * Count the number of entries to be returned. Count devices
6254 	 * that are offlline, but have been persistently bound.
6255 	 */
6256 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6257 		for (link = ha->dev[index].first; link != NULL;
6258 		    link = link->next) {
6259 			tq = link->base_address;
6260 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6261 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6262 				continue;	/* Skip this one */
6263 			}
6264 			dd.TotalDevices++;
6265 		}
6266 	}
6267 	/*
6268 	 * Compute the number of entries that can be returned
6269 	 * based upon the size of caller's response buffer.
6270 	 */
6271 	dd.ReturnListEntryCount = 0;
6272 	if (dd.TotalDevices == 0) {
6273 		rlen = sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY);
6274 	} else {
6275 		rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6276 		    (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6277 	}
6278 	if (rlen > cmd->ResponseLen) {
6279 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6280 		cmd->DetailStatus = rlen;
6281 		EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6282 		    rlen, cmd->ResponseLen);
6283 		cmd->ResponseLen = 0;
6284 		return;
6285 	}
6286 	cmd->ResponseLen = 0;
6287 	uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6288 	uddep = &uddp->EntryList[0];
6289 	for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6290 		for (link = ha->dev[index].first; link != NULL;
6291 		    link = link->next) {
6292 			tq = link->base_address;
6293 			if (tq->flags & TQF_INITIATOR_DEVICE ||
6294 			    !VALID_TARGET_ID(ha, tq->loop_id)) {
6295 				continue;	/* Skip this one */
6296 			}
6297 
6298 			bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6299 
6300 			bcopy(tq->node_name, ddep->NodeWWN, 8);
6301 			bcopy(tq->port_name, ddep->PortWWN, 8);
6302 
6303 			ddep->PortID[0] = tq->d_id.b.domain;
6304 			ddep->PortID[1] = tq->d_id.b.area;
6305 			ddep->PortID[2] = tq->d_id.b.al_pa;
6306 
6307 			bcopy(tq->port_name,
6308 			    (caddr_t)&ddep->TargetAddress.Target, 8);
6309 
6310 			ddep->DeviceFlags = tq->flags;
6311 			ddep->LoopID = tq->loop_id;
6312 			QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6313 			    "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6314 			    "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6315 			    ha->instance, ddep->TargetAddress.Target,
6316 			    ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6317 			    ddep->NodeWWN[2], ddep->NodeWWN[3],
6318 			    ddep->NodeWWN[4], ddep->NodeWWN[5],
6319 			    ddep->NodeWWN[6], ddep->NodeWWN[7],
6320 			    ddep->PortWWN[0], ddep->PortWWN[1],
6321 			    ddep->PortWWN[2], ddep->PortWWN[3],
6322 			    ddep->PortWWN[4], ddep->PortWWN[5],
6323 			    ddep->PortWWN[6], ddep->PortWWN[7]);
6324 			rval = ddi_copyout((void *)ddep, (void *)uddep,
6325 			    sizeof (EXT_DEVICEDATAENTRY), mode);
6326 
6327 			if (rval != 0) {
6328 				cmd->Status = EXT_STATUS_COPY_ERR;
6329 				cmd->ResponseLen = 0;
6330 				EL(ha, "failed, ddi_copyout\n");
6331 				break;
6332 			}
6333 			dd.ReturnListEntryCount++;
6334 			uddep++;
6335 			cmd->ResponseLen += (uint32_t)
6336 			    sizeof (EXT_DEVICEDATAENTRY);
6337 		}
6338 	}
6339 	rval = ddi_copyout((void *)&dd, (void *)uddp,
6340 	    sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6341 
6342 	if (rval != 0) {
6343 		cmd->Status = EXT_STATUS_COPY_ERR;
6344 		cmd->ResponseLen = 0;
6345 		EL(ha, "failed, ddi_copyout-2\n");
6346 	} else {
6347 		cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6348 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6349 	}
6350 }
6351 
6352 /*
6353  * ql_get_target_id
6354  *	Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6355  *
6356  * Input:
6357  *	ha:	adapter state pointer.
6358  *	cmd:	Local EXT_IOCTL cmd struct pointer.
6359  *	mode:	flags.
6360  *
6361  * Returns:
6362  *	None, request status indicated in cmd->Status.
6363  *
6364  * Context:
6365  *	Kernel context.
6366  */
6367 static void
ql_get_target_id(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)6368 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6369 {
6370 	uint32_t		rval;
6371 	uint16_t		qlnt;
6372 	EXT_DEST_ADDR		extdestaddr = {0};
6373 	uint8_t			*name;
6374 	uint8_t			wwpn[EXT_DEF_WWN_NAME_SIZE];
6375 	ql_tgt_t		*tq;
6376 
6377 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6378 
6379 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6380 	    (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6381 		EL(ha, "failed, ddi_copyin\n");
6382 		cmd->Status = EXT_STATUS_COPY_ERR;
6383 		cmd->ResponseLen = 0;
6384 		return;
6385 	}
6386 
6387 	qlnt = QLNT_PORT;
6388 	name = wwpn;
6389 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6390 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
6391 	    name[5], name[6], name[7]);
6392 
6393 	tq = ql_find_port(ha, name, qlnt);
6394 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6395 		EL(ha, "failed, fc_port not found\n");
6396 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6397 		cmd->ResponseLen = 0;
6398 		return;
6399 	}
6400 
6401 	bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6402 
6403 	rval = ddi_copyout((void *)&extdestaddr,
6404 	    (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6405 	if (rval != 0) {
6406 		EL(ha, "failed, ddi_copyout\n");
6407 		cmd->Status = EXT_STATUS_COPY_ERR;
6408 		cmd->ResponseLen = 0;
6409 	}
6410 
6411 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6412 }
6413 
6414 /*
6415  * ql_setup_fcache
6416  *	Populates selected flash sections into the cache
6417  *
6418  * Input:
6419  *	ha = adapter state pointer.
6420  *
6421  * Returns:
6422  *	ql local function return status code.
6423  *
6424  * Context:
6425  *	Kernel context.
6426  *
6427  * Note:
6428  *	Driver must be in stalled state prior to entering or
6429  *	add code to this function prior to calling ql_setup_flash()
6430  */
6431 int
ql_setup_fcache(ql_adapter_state_t * ha)6432 ql_setup_fcache(ql_adapter_state_t *ha)
6433 {
6434 	int		rval;
6435 	uint32_t	freadpos = 0;
6436 	uint32_t	fw_done = 0;
6437 	ql_fcache_t	*head = NULL;
6438 	ql_fcache_t	*tail = NULL;
6439 	ql_fcache_t	*ftmp;
6440 
6441 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6442 
6443 	CACHE_LOCK(ha);
6444 
6445 	/* If we already have populated it, rtn */
6446 	if (ha->fcache != NULL) {
6447 		CACHE_UNLOCK(ha);
6448 		EL(ha, "buffer already populated\n");
6449 		return (QL_SUCCESS);
6450 	}
6451 
6452 	ql_flash_nvram_defaults(ha);
6453 
6454 	if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6455 		CACHE_UNLOCK(ha);
6456 		EL(ha, "unable to setup flash; rval=%xh\n", rval);
6457 		return (rval);
6458 	}
6459 
6460 	while (freadpos != 0xffffffff) {
6461 		/* Allocate & populate this node */
6462 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6463 			EL(ha, "node alloc failed\n");
6464 			rval = QL_FUNCTION_FAILED;
6465 			break;
6466 		}
6467 
6468 		/* link in the new node */
6469 		if (head == NULL) {
6470 			head = tail = ftmp;
6471 		} else {
6472 			tail->next = ftmp;
6473 			tail = ftmp;
6474 		}
6475 
6476 		/* Do the firmware node first for 24xx/25xx's */
6477 		if (fw_done == 0) {
6478 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
6479 				freadpos = ha->flash_fw_addr << 2;
6480 			}
6481 			fw_done = 1;
6482 		}
6483 
6484 		if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6485 		    freadpos)) != QL_SUCCESS) {
6486 			EL(ha, "failed, 24xx dump_fcode"
6487 			    " pos=%xh rval=%xh\n", freadpos, rval);
6488 			rval = QL_FUNCTION_FAILED;
6489 			break;
6490 		}
6491 
6492 		/* checkout the pci data / format */
6493 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6494 			EL(ha, "flash header incorrect\n");
6495 			rval = QL_FUNCTION_FAILED;
6496 			break;
6497 		}
6498 	}
6499 
6500 	if (rval != QL_SUCCESS) {
6501 		/* release all resources we have */
6502 		ftmp = head;
6503 		while (ftmp != NULL) {
6504 			tail = ftmp->next;
6505 			kmem_free(ftmp->buf, FBUFSIZE);
6506 			kmem_free(ftmp, sizeof (ql_fcache_t));
6507 			ftmp = tail;
6508 		}
6509 
6510 		EL(ha, "failed, done\n");
6511 	} else {
6512 		ha->fcache = head;
6513 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6514 	}
6515 	CACHE_UNLOCK(ha);
6516 
6517 	return (rval);
6518 }
6519 
6520 /*
6521  * ql_update_fcache
6522  *	re-populates updated flash into the fcache. If
6523  *	fcache does not exist (e.g., flash was empty/invalid on
6524  *	boot), this routine will create and the populate it.
6525  *
6526  * Input:
6527  *	ha	= adapter state pointer.
6528  *	*bpf 	= Pointer to flash buffer.
6529  *	bsize	= Size of flash buffer.
6530  *
6531  * Returns:
6532  *
6533  * Context:
6534  *	Kernel context.
6535  */
6536 void
ql_update_fcache(ql_adapter_state_t * ha,uint8_t * bfp,uint32_t bsize)6537 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6538 {
6539 	int		rval = QL_SUCCESS;
6540 	uint32_t	freadpos = 0;
6541 	uint32_t	fw_done = 0;
6542 	ql_fcache_t	*head = NULL;
6543 	ql_fcache_t	*tail = NULL;
6544 	ql_fcache_t	*ftmp;
6545 
6546 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6547 
6548 	while (freadpos != 0xffffffff) {
6549 
6550 		/* Allocate & populate this node */
6551 
6552 		if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6553 			EL(ha, "node alloc failed\n");
6554 			rval = QL_FUNCTION_FAILED;
6555 			break;
6556 		}
6557 
6558 		/* link in the new node */
6559 		if (head == NULL) {
6560 			head = tail = ftmp;
6561 		} else {
6562 			tail->next = ftmp;
6563 			tail = ftmp;
6564 		}
6565 
6566 		/* Do the firmware node first for 24xx's */
6567 		if (fw_done == 0) {
6568 			if (CFG_IST(ha, CFG_CTRL_24258081)) {
6569 				freadpos = ha->flash_fw_addr << 2;
6570 			}
6571 			fw_done = 1;
6572 		}
6573 
6574 		/* read in first FBUFSIZE bytes of this flash section */
6575 		if (freadpos+FBUFSIZE > bsize) {
6576 			EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6577 			    freadpos, bsize);
6578 			rval = QL_FUNCTION_FAILED;
6579 			break;
6580 		}
6581 		bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6582 
6583 		/* checkout the pci data / format */
6584 		if (ql_check_pci(ha, ftmp, &freadpos)) {
6585 			EL(ha, "flash header incorrect\n");
6586 			rval = QL_FUNCTION_FAILED;
6587 			break;
6588 		}
6589 	}
6590 
6591 	if (rval != QL_SUCCESS) {
6592 		/*
6593 		 * release all resources we have
6594 		 */
6595 		ql_fcache_rel(head);
6596 		EL(ha, "failed, done\n");
6597 	} else {
6598 		/*
6599 		 * Release previous fcache resources and update with new
6600 		 */
6601 		CACHE_LOCK(ha);
6602 		ql_fcache_rel(ha->fcache);
6603 		ha->fcache = head;
6604 		CACHE_UNLOCK(ha);
6605 
6606 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6607 	}
6608 }
6609 
6610 /*
6611  * ql_setup_fnode
6612  *	Allocates fcache node
6613  *
6614  * Input:
6615  *	ha = adapter state pointer.
6616  *	node = point to allocated fcache node (NULL = failed)
6617  *
6618  * Returns:
6619  *
6620  * Context:
6621  *	Kernel context.
6622  *
6623  * Note:
6624  *	Driver must be in stalled state prior to entering or
6625  *	add code to this function prior to calling ql_setup_flash()
6626  */
6627 static ql_fcache_t *
ql_setup_fnode(ql_adapter_state_t * ha)6628 ql_setup_fnode(ql_adapter_state_t *ha)
6629 {
6630 	ql_fcache_t	*fnode = NULL;
6631 
6632 	if ((fnode = (ql_fcache_t *)(kmem_zalloc(sizeof (ql_fcache_t),
6633 	    KM_SLEEP))) == NULL) {
6634 		EL(ha, "fnode alloc failed\n");
6635 		fnode = NULL;
6636 	} else if ((fnode->buf = (uint8_t *)(kmem_zalloc(FBUFSIZE,
6637 	    KM_SLEEP))) == NULL) {
6638 		EL(ha, "buf alloc failed\n");
6639 		kmem_free(fnode, sizeof (ql_fcache_t));
6640 		fnode = NULL;
6641 	} else {
6642 		fnode->buflen = FBUFSIZE;
6643 	}
6644 
6645 	return (fnode);
6646 }
6647 
6648 /*
6649  * ql_fcache_rel
6650  *	Releases the fcache resources
6651  *
6652  * Input:
6653  *	ha	= adapter state pointer.
6654  *	head	= Pointer to fcache linked list
6655  *
6656  * Returns:
6657  *
6658  * Context:
6659  *	Kernel context.
6660  *
6661  */
6662 void
ql_fcache_rel(ql_fcache_t * head)6663 ql_fcache_rel(ql_fcache_t *head)
6664 {
6665 	ql_fcache_t	*ftmp = head;
6666 	ql_fcache_t	*tail;
6667 
6668 	/* release all resources we have */
6669 	while (ftmp != NULL) {
6670 		tail = ftmp->next;
6671 		kmem_free(ftmp->buf, FBUFSIZE);
6672 		kmem_free(ftmp, sizeof (ql_fcache_t));
6673 		ftmp = tail;
6674 	}
6675 }
6676 
6677 /*
6678  * ql_update_flash_caches
6679  *	Updates driver flash caches
6680  *
6681  * Input:
6682  *	ha:	adapter state pointer.
6683  *
6684  * Context:
6685  *	Kernel context.
6686  */
6687 static void
ql_update_flash_caches(ql_adapter_state_t * ha)6688 ql_update_flash_caches(ql_adapter_state_t *ha)
6689 {
6690 	uint32_t		len;
6691 	ql_link_t		*link;
6692 	ql_adapter_state_t	*ha2;
6693 
6694 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6695 
6696 	/* Get base path length. */
6697 	for (len = (uint32_t)strlen(ha->devpath); len; len--) {
6698 		if (ha->devpath[len] == ',' ||
6699 		    ha->devpath[len] == '@') {
6700 			break;
6701 		}
6702 	}
6703 
6704 	/* Reset fcache on all adapter instances. */
6705 	for (link = ql_hba.first; link != NULL; link = link->next) {
6706 		ha2 = link->base_address;
6707 
6708 		if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
6709 			continue;
6710 		}
6711 
6712 		CACHE_LOCK(ha2);
6713 		ql_fcache_rel(ha2->fcache);
6714 		ha2->fcache = NULL;
6715 
6716 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6717 			if (ha2->vcache != NULL) {
6718 				kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
6719 				ha2->vcache = NULL;
6720 			}
6721 		}
6722 		CACHE_UNLOCK(ha2);
6723 
6724 		(void) ql_setup_fcache(ha2);
6725 	}
6726 
6727 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6728 }
6729 
6730 /*
6731  * ql_get_fbuf
6732  *	Search the fcache list for the type specified
6733  *
6734  * Input:
6735  *	fptr	= Pointer to fcache linked list
6736  *	ftype	= Type of image to be returned.
6737  *
6738  * Returns:
6739  *	Pointer to ql_fcache_t.
6740  *	NULL means not found.
6741  *
6742  * Context:
6743  *	Kernel context.
6744  *
6745  *
6746  */
6747 ql_fcache_t *
ql_get_fbuf(ql_fcache_t * fptr,uint32_t ftype)6748 ql_get_fbuf(ql_fcache_t *fptr, uint32_t ftype)
6749 {
6750 	while (fptr != NULL) {
6751 		/* does this image meet criteria? */
6752 		if (ftype & fptr->type) {
6753 			break;
6754 		}
6755 		fptr = fptr->next;
6756 	}
6757 	return (fptr);
6758 }
6759 
6760 /*
6761  * ql_check_pci
6762  *
6763  *	checks the passed buffer for a valid pci signature and
6764  *	expected (and in range) pci length values.
6765  *
6766  *	For firmware type, a pci header is added since the image in
6767  *	the flash does not have one (!!!).
6768  *
6769  *	On successful pci check, nextpos adjusted to next pci header.
6770  *
6771  * Returns:
6772  *	-1 --> last pci image
6773  *	0 --> pci header valid
6774  *	1 --> pci header invalid.
6775  *
6776  * Context:
6777  *	Kernel context.
6778  */
6779 static int
ql_check_pci(ql_adapter_state_t * ha,ql_fcache_t * fcache,uint32_t * nextpos)6780 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6781 {
6782 	pci_header_t	*pcih;
6783 	pci_data_t	*pcid;
6784 	uint32_t	doff;
6785 	uint8_t		*pciinfo;
6786 
6787 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6788 
6789 	if (fcache != NULL) {
6790 		pciinfo = fcache->buf;
6791 	} else {
6792 		EL(ha, "failed, null fcache ptr passed\n");
6793 		return (1);
6794 	}
6795 
6796 	if (pciinfo == NULL) {
6797 		EL(ha, "failed, null pciinfo ptr passed\n");
6798 		return (1);
6799 	}
6800 
6801 	if (CFG_IST(ha, CFG_SBUS_CARD)) {
6802 		caddr_t	bufp;
6803 		uint_t	len;
6804 
6805 		if (pciinfo[0] != SBUS_CODE_FCODE) {
6806 			EL(ha, "failed, unable to detect sbus fcode\n");
6807 			return (1);
6808 		}
6809 		fcache->type = FTYPE_FCODE;
6810 
6811 		/*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6812 		if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6813 		    PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6814 		    DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6815 		    (int *)&len) == DDI_PROP_SUCCESS) {
6816 
6817 			(void) snprintf(fcache->verstr,
6818 			    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6819 			kmem_free(bufp, len);
6820 		}
6821 
6822 		*nextpos = 0xffffffff;
6823 
6824 		QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n",
6825 		    ha->instance);
6826 
6827 		return (0);
6828 	}
6829 
6830 	if (*nextpos == ha->flash_fw_addr << 2) {
6831 
6832 		pci_header_t	fwh = {0};
6833 		pci_data_t	fwd = {0};
6834 		uint8_t		*buf, *bufp;
6835 
6836 		/*
6837 		 * Build a pci header for the firmware module
6838 		 */
6839 		if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6840 		    NULL) {
6841 			EL(ha, "failed, unable to allocate buffer\n");
6842 			return (1);
6843 		}
6844 
6845 		fwh.signature[0] = PCI_HEADER0;
6846 		fwh.signature[1] = PCI_HEADER1;
6847 		fwh.dataoffset[0] = LSB(sizeof (pci_header_t));
6848 		fwh.dataoffset[1] = MSB(sizeof (pci_header_t));
6849 
6850 		fwd.signature[0] = 'P';
6851 		fwd.signature[1] = 'C';
6852 		fwd.signature[2] = 'I';
6853 		fwd.signature[3] = 'R';
6854 		fwd.codetype = PCI_CODE_FW;
6855 		fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6856 		fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6857 
6858 		bufp = buf;
6859 		bcopy(&fwh, bufp, sizeof (pci_header_t));
6860 		bufp += sizeof (pci_header_t);
6861 		bcopy(&fwd, bufp, sizeof (pci_data_t));
6862 		bufp += sizeof (pci_data_t);
6863 
6864 		bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6865 		    sizeof (pci_data_t)));
6866 		bcopy(buf, fcache->buf, FBUFSIZE);
6867 
6868 		fcache->type = FTYPE_FW;
6869 
6870 		(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6871 		    "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6872 		    fcache->buf[27]);
6873 
6874 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
6875 			*nextpos = 0x200000;
6876 		} else if (CFG_IST(ha, CFG_CTRL_8021)) {
6877 			*nextpos = 0x80000;
6878 		} else {
6879 			*nextpos = 0;
6880 		}
6881 		kmem_free(buf, FBUFSIZE);
6882 
6883 		QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance);
6884 
6885 		return (0);
6886 	}
6887 
6888 	/* get to the pci header image length */
6889 	pcih = (pci_header_t *)pciinfo;
6890 
6891 	doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6892 
6893 	/* some header section sanity check */
6894 	if (pcih->signature[0] != PCI_HEADER0 ||
6895 	    pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6896 		EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6897 		    pcih->signature[0], pcih->signature[1], doff);
6898 		return (1);
6899 	}
6900 
6901 	pcid = (pci_data_t *)(pciinfo + doff);
6902 
6903 	/* a slight sanity data section check */
6904 	if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6905 	    pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6906 		EL(ha, "failed, data sig mismatch!\n");
6907 		return (1);
6908 	}
6909 
6910 	if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6911 		QL_PRINT_9(CE_CONT, "(%d): last image\n", ha->instance);
6912 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
6913 			ql_flash_layout_table(ha, *nextpos +
6914 			    (pcid->imagelength[0] | (pcid->imagelength[1] <<
6915 			    8)) * PCI_SECTOR_SIZE);
6916 			(void) ql_24xx_flash_desc(ha);
6917 		}
6918 		*nextpos = 0xffffffff;
6919 	} else {
6920 		/* adjust the next flash read start position */
6921 		*nextpos += (pcid->imagelength[0] |
6922 		    (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6923 	}
6924 
6925 	switch (pcid->codetype) {
6926 	case PCI_CODE_X86PC:
6927 		fcache->type = FTYPE_BIOS;
6928 		break;
6929 	case PCI_CODE_FCODE:
6930 		fcache->type = FTYPE_FCODE;
6931 		break;
6932 	case PCI_CODE_EFI:
6933 		fcache->type = FTYPE_EFI;
6934 		break;
6935 	case PCI_CODE_HPPA:
6936 		fcache->type = FTYPE_HPPA;
6937 		break;
6938 	default:
6939 		fcache->type = FTYPE_UNKNOWN;
6940 		break;
6941 	}
6942 
6943 	(void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6944 	    "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6945 
6946 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6947 
6948 	return (0);
6949 }
6950 
6951 /*
6952  * ql_flash_layout_table
6953  *	Obtains flash addresses from table
6954  *
6955  * Input:
6956  *	ha:		adapter state pointer.
6957  *	flt_paddr:	flash layout pointer address.
6958  *
6959  * Context:
6960  *	Kernel context.
6961  */
6962 static void
ql_flash_layout_table(ql_adapter_state_t * ha,uint32_t flt_paddr)6963 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
6964 {
6965 	ql_flt_ptr_t	*fptr;
6966 	uint8_t		*bp;
6967 	int		rval;
6968 	uint32_t	len, faddr, cnt;
6969 	uint16_t	chksum, w16;
6970 
6971 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6972 
6973 	/* Process flash layout table header */
6974 	len = sizeof (ql_flt_ptr_t);
6975 	if ((bp = kmem_zalloc(len, KM_SLEEP)) == NULL) {
6976 		EL(ha, "kmem_zalloc=null\n");
6977 		return;
6978 	}
6979 
6980 	/* Process pointer to flash layout table */
6981 	if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) {
6982 		EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
6983 		    rval);
6984 		kmem_free(bp, len);
6985 		return;
6986 	}
6987 	fptr = (ql_flt_ptr_t *)bp;
6988 
6989 	/* Verify pointer to flash layout table. */
6990 	for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6991 		w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6992 		chksum += w16;
6993 	}
6994 	if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
6995 	    fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
6996 		EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0],
6997 		    fptr->sig[1], fptr->sig[2], fptr->sig[3]);
6998 		kmem_free(bp, len);
6999 		return;
7000 	}
7001 	faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
7002 	    fptr->addr[3]);
7003 
7004 	kmem_free(bp, len);
7005 
7006 	ql_process_flt(ha, faddr);
7007 
7008 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7009 }
7010 
7011 /*
7012  * ql_process_flt
7013  *	Obtains flash addresses from flash layout table
7014  *
7015  * Input:
7016  *	ha:	adapter state pointer.
7017  *	faddr:	flash layout table byte address.
7018  *
7019  * Context:
7020  *	Kernel context.
7021  */
7022 static void
ql_process_flt(ql_adapter_state_t * ha,uint32_t faddr)7023 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr)
7024 {
7025 	ql_flt_hdr_t	*fhdr;
7026 	ql_flt_region_t	*frgn;
7027 	uint8_t		*bp, *eaddr, nv_rg, vpd_rg;
7028 	int		rval;
7029 	uint32_t	len, cnt, fe_addr;
7030 	uint16_t	chksum, w16;
7031 
7032 	QL_PRINT_9(CE_CONT, "(%d): started faddr=%xh\n", ha->instance, faddr);
7033 
7034 	/* Process flash layout table header */
7035 	if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
7036 		EL(ha, "kmem_zalloc=null\n");
7037 		return;
7038 	}
7039 	fhdr = (ql_flt_hdr_t *)bp;
7040 
7041 	/* Process flash layout table. */
7042 	if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
7043 	    QL_SUCCESS) {
7044 		EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
7045 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7046 		return;
7047 	}
7048 
7049 	/* Verify flash layout table. */
7050 	len = (uint32_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
7051 	    sizeof (ql_flt_hdr_t) + sizeof (ql_flt_region_t));
7052 	if (len > FLASH_LAYOUT_TABLE_SIZE) {
7053 		chksum = 0xffff;
7054 	} else {
7055 		for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7056 			w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
7057 			chksum += w16;
7058 		}
7059 	}
7060 	w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
7061 	if (chksum != 0 || w16 != 1) {
7062 		EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
7063 		kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7064 		return;
7065 	}
7066 	eaddr = bp + len;
7067 
7068 	/* Process Function/Port Configuration Map. */
7069 	nv_rg = vpd_rg = 0;
7070 	if (CFG_IST(ha, CFG_CTRL_8021)) {
7071 		uint16_t	i;
7072 		uint8_t		*mbp = eaddr;
7073 		ql_fp_cfg_map_t	*cmp = (ql_fp_cfg_map_t *)mbp;
7074 
7075 		len = (uint32_t)(CHAR_TO_SHORT(cmp->hdr.len[0],
7076 		    cmp->hdr.len[1]));
7077 		if (len > FLASH_LAYOUT_TABLE_SIZE) {
7078 			chksum = 0xffff;
7079 		} else {
7080 			for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7081 				w16 = (uint16_t)CHAR_TO_SHORT(mbp[cnt],
7082 				    mbp[cnt + 1]);
7083 				chksum += w16;
7084 			}
7085 		}
7086 		w16 = CHAR_TO_SHORT(cmp->hdr.version[0], cmp->hdr.version[1]);
7087 		if (chksum != 0 || w16 != 1 ||
7088 		    cmp->hdr.Signature[0] != 'F' ||
7089 		    cmp->hdr.Signature[1] != 'P' ||
7090 		    cmp->hdr.Signature[2] != 'C' ||
7091 		    cmp->hdr.Signature[3] != 'M') {
7092 			EL(ha, "cfg_map chksum=%xh, version=%d, "
7093 			    "sig=%c%c%c%c\n", chksum, w16,
7094 			    cmp->hdr.Signature[0], cmp->hdr.Signature[1],
7095 			    cmp->hdr.Signature[2], cmp->hdr.Signature[3]);
7096 		} else {
7097 			cnt = (uint16_t)
7098 			    (CHAR_TO_SHORT(cmp->hdr.NumberEntries[0],
7099 			    cmp->hdr.NumberEntries[1]));
7100 			/* Locate entry for function. */
7101 			for (i = 0; i < cnt; i++) {
7102 				if (cmp->cfg[i].FunctionType == FT_FC &&
7103 				    cmp->cfg[i].FunctionNumber[0] ==
7104 				    ha->function_number &&
7105 				    cmp->cfg[i].FunctionNumber[1] == 0) {
7106 					nv_rg = cmp->cfg[i].ConfigRegion;
7107 					vpd_rg = cmp->cfg[i].VpdRegion;
7108 					break;
7109 				}
7110 			}
7111 
7112 			if (nv_rg == 0 || vpd_rg == 0) {
7113 				EL(ha, "cfg_map nv_rg=%d, vpd_rg=%d\n", nv_rg,
7114 				    vpd_rg);
7115 				nv_rg = vpd_rg = 0;
7116 			}
7117 		}
7118 	}
7119 
7120 	/* Process flash layout table regions */
7121 	for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
7122 	    (uint8_t *)frgn < eaddr; frgn++) {
7123 		faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
7124 		    frgn->beg_addr[2], frgn->beg_addr[3]);
7125 		faddr >>= 2;
7126 		fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1],
7127 		    frgn->end_addr[2], frgn->end_addr[3]);
7128 		fe_addr >>= 2;
7129 
7130 		switch (frgn->region) {
7131 		case FLASH_8021_BOOTLOADER_REGION:
7132 			ha->bootloader_addr = faddr;
7133 			ha->bootloader_size = (fe_addr - faddr) + 1;
7134 			QL_PRINT_9(CE_CONT, "(%d): bootloader_addr=%xh, "
7135 			    "size=%xh\n", ha->instance, faddr,
7136 			    ha->bootloader_size);
7137 			break;
7138 		case FLASH_FW_REGION:
7139 		case FLASH_8021_FW_REGION:
7140 			ha->flash_fw_addr = faddr;
7141 			ha->flash_fw_size = (fe_addr - faddr) + 1;
7142 			QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh, "
7143 			    "size=%xh\n", ha->instance, faddr,
7144 			    ha->flash_fw_size);
7145 			break;
7146 		case FLASH_GOLDEN_FW_REGION:
7147 		case FLASH_8021_GOLDEN_FW_REGION:
7148 			ha->flash_golden_fw_addr = faddr;
7149 			QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n",
7150 			    ha->instance, faddr);
7151 			break;
7152 		case FLASH_8021_VPD_REGION:
7153 			if (!vpd_rg || vpd_rg == FLASH_8021_VPD_REGION) {
7154 				ha->flash_vpd_addr = faddr;
7155 				QL_PRINT_9(CE_CONT, "(%d): 8021_flash_vpd_"
7156 				    "addr=%xh\n", ha->instance, faddr);
7157 			}
7158 			break;
7159 		case FLASH_VPD_0_REGION:
7160 			if (vpd_rg) {
7161 				if (vpd_rg == FLASH_VPD_0_REGION) {
7162 					ha->flash_vpd_addr = faddr;
7163 					QL_PRINT_9(CE_CONT, "(%d): vpd_rg  "
7164 					    "flash_vpd_addr=%xh\n",
7165 					    ha->instance, faddr);
7166 				}
7167 			} else if (!(ha->flags & FUNCTION_1) &&
7168 			    !(CFG_IST(ha, CFG_CTRL_8021))) {
7169 				ha->flash_vpd_addr = faddr;
7170 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7171 				    "\n", ha->instance, faddr);
7172 			}
7173 			break;
7174 		case FLASH_NVRAM_0_REGION:
7175 			if (nv_rg) {
7176 				if (nv_rg == FLASH_NVRAM_0_REGION) {
7177 					ADAPTER_STATE_LOCK(ha);
7178 					ha->flags &= ~FUNCTION_1;
7179 					ADAPTER_STATE_UNLOCK(ha);
7180 					ha->flash_nvram_addr = faddr;
7181 					QL_PRINT_9(CE_CONT, "(%d): nv_rg "
7182 					    "flash_nvram_addr=%xh\n",
7183 					    ha->instance, faddr);
7184 				}
7185 			} else if (!(ha->flags & FUNCTION_1)) {
7186 				ha->flash_nvram_addr = faddr;
7187 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7188 				    "%xh\n", ha->instance, faddr);
7189 			}
7190 			break;
7191 		case FLASH_VPD_1_REGION:
7192 			if (vpd_rg) {
7193 				if (vpd_rg == FLASH_VPD_1_REGION) {
7194 					ha->flash_vpd_addr = faddr;
7195 					QL_PRINT_9(CE_CONT, "(%d): vpd_rg "
7196 					    "flash_vpd_addr=%xh\n",
7197 					    ha->instance, faddr);
7198 				}
7199 			} else if (ha->flags & FUNCTION_1 &&
7200 			    !(CFG_IST(ha, CFG_CTRL_8021))) {
7201 				ha->flash_vpd_addr = faddr;
7202 				QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7203 				    "\n", ha->instance, faddr);
7204 			}
7205 			break;
7206 		case FLASH_NVRAM_1_REGION:
7207 			if (nv_rg) {
7208 				if (nv_rg == FLASH_NVRAM_1_REGION) {
7209 					ADAPTER_STATE_LOCK(ha);
7210 					ha->flags |= FUNCTION_1;
7211 					ADAPTER_STATE_UNLOCK(ha);
7212 					ha->flash_nvram_addr = faddr;
7213 					QL_PRINT_9(CE_CONT, "(%d): nv_rg "
7214 					    "flash_nvram_addr=%xh\n",
7215 					    ha->instance, faddr);
7216 				}
7217 			} else if (ha->flags & FUNCTION_1) {
7218 				ha->flash_nvram_addr = faddr;
7219 				QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7220 				    "%xh\n", ha->instance, faddr);
7221 			}
7222 			break;
7223 		case FLASH_DESC_TABLE_REGION:
7224 			if (!(CFG_IST(ha, CFG_CTRL_8021))) {
7225 				ha->flash_desc_addr = faddr;
7226 				QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr="
7227 				    "%xh\n", ha->instance, faddr);
7228 			}
7229 			break;
7230 		case FLASH_ERROR_LOG_0_REGION:
7231 			if (!(ha->flags & FUNCTION_1)) {
7232 				ha->flash_errlog_start = faddr;
7233 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7234 				    "%xh\n", ha->instance, faddr);
7235 			}
7236 			break;
7237 		case FLASH_ERROR_LOG_1_REGION:
7238 			if (ha->flags & FUNCTION_1) {
7239 				ha->flash_errlog_start = faddr;
7240 				QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7241 				    "%xh\n", ha->instance, faddr);
7242 			}
7243 			break;
7244 		default:
7245 			break;
7246 		}
7247 	}
7248 	kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7249 
7250 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7251 }
7252 
7253 /*
7254  * ql_flash_nvram_defaults
7255  *	Flash default addresses.
7256  *
7257  * Input:
7258  *	ha:		adapter state pointer.
7259  *
7260  * Returns:
7261  *	ql local function return status code.
7262  *
7263  * Context:
7264  *	Kernel context.
7265  */
7266 static void
ql_flash_nvram_defaults(ql_adapter_state_t * ha)7267 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
7268 {
7269 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7270 
7271 	if (ha->flags & FUNCTION_1) {
7272 		if (CFG_IST(ha, CFG_CTRL_2300)) {
7273 			ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
7274 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7275 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
7276 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7277 			ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
7278 			ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
7279 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
7280 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7281 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7282 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7283 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7284 			ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
7285 			ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
7286 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
7287 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7288 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7289 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7290 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7291 			ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
7292 			ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
7293 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
7294 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7295 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7296 		} else if (CFG_IST(ha, CFG_CTRL_8021)) {
7297 			ha->flash_data_addr = 0;
7298 			ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR;
7299 			ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR;
7300 			ha->flash_errlog_start = 0;
7301 			ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7302 			ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7303 			ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7304 			ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7305 			ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7306 		}
7307 	} else {
7308 		if (CFG_IST(ha, CFG_CTRL_2200)) {
7309 			ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
7310 			ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
7311 		} else if (CFG_IST(ha, CFG_CTRL_2300) ||
7312 		    (CFG_IST(ha, CFG_CTRL_6322))) {
7313 			ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
7314 			ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7315 		} else if (CFG_IST(ha, CFG_CTRL_2422)) {
7316 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7317 			ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
7318 			ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
7319 			ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
7320 			ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7321 			ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7322 		} else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7323 			ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7324 			ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
7325 			ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7326 			ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7327 			ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7328 			ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7329 		} else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7330 			ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7331 			ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7332 			ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7333 			ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7334 			ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7335 			ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7336 		} else if (CFG_IST(ha, CFG_CTRL_8021)) {
7337 			ha->flash_data_addr = 0;
7338 			ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR;
7339 			ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR;
7340 			ha->flash_errlog_start = 0;
7341 			ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7342 			ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7343 			ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7344 			ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7345 			ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7346 		} else {
7347 			EL(ha, "unassigned flash fn0 addr: %x\n",
7348 			    ha->device_id);
7349 		}
7350 	}
7351 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7352 }
7353 
7354 /*
7355  * ql_get_sfp
7356  *	Returns sfp data to sdmapi caller
7357  *
7358  * Input:
7359  *	ha:	adapter state pointer.
7360  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7361  *	mode:	flags.
7362  *
7363  * Returns:
7364  *	None, request status indicated in cmd->Status.
7365  *
7366  * Context:
7367  *	Kernel context.
7368  */
7369 static void
ql_get_sfp(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)7370 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7371 {
7372 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7373 
7374 	if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
7375 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7376 		EL(ha, "failed, invalid request for HBA\n");
7377 		return;
7378 	}
7379 
7380 	if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7381 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7382 		cmd->DetailStatus = QL_24XX_SFP_SIZE;
7383 		EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7384 		    cmd->ResponseLen);
7385 		return;
7386 	}
7387 
7388 	/* Dump SFP data in user buffer */
7389 	if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7390 	    mode)) != 0) {
7391 		cmd->Status = EXT_STATUS_COPY_ERR;
7392 		EL(ha, "failed, copy error\n");
7393 	} else {
7394 		cmd->Status = EXT_STATUS_OK;
7395 	}
7396 
7397 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7398 }
7399 
7400 /*
7401  * ql_dump_sfp
7402  *	Dumps SFP.
7403  *
7404  * Input:
7405  *	ha:	adapter state pointer.
7406  *	bp:	buffer address.
7407  *	mode:	flags
7408  *
7409  * Returns:
7410  *
7411  * Context:
7412  *	Kernel context.
7413  */
7414 static int
ql_dump_sfp(ql_adapter_state_t * ha,void * bp,int mode)7415 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7416 {
7417 	dma_mem_t	mem;
7418 	uint32_t	cnt;
7419 	int		rval2, rval = 0;
7420 	uint32_t	dxfer;
7421 
7422 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7423 
7424 	/* Get memory for SFP. */
7425 
7426 	if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7427 	    QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7428 		EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7429 		return (ENOMEM);
7430 	}
7431 
7432 	for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7433 		rval2 = ql_read_sfp(ha, &mem,
7434 		    (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7435 		    (uint16_t)(cnt & 0xff));
7436 		if (rval2 != QL_SUCCESS) {
7437 			EL(ha, "failed, read_sfp=%xh\n", rval2);
7438 			rval = EFAULT;
7439 			break;
7440 		}
7441 
7442 		/* copy the data back */
7443 		if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7444 		    mode)) != mem.size) {
7445 			/* ddi copy error */
7446 			EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7447 			rval = EFAULT;
7448 			break;
7449 		}
7450 
7451 		/* adjust the buffer pointer */
7452 		bp = (caddr_t)bp + mem.size;
7453 	}
7454 
7455 	ql_free_phys(ha, &mem);
7456 
7457 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7458 
7459 	return (rval);
7460 }
7461 
7462 /*
7463  * ql_port_param
7464  *	Retrieves or sets the firmware port speed settings
7465  *
7466  * Input:
7467  *	ha:	adapter state pointer.
7468  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7469  *	mode:	flags.
7470  *
7471  * Returns:
7472  *	None, request status indicated in cmd->Status.
7473  *
7474  * Context:
7475  *	Kernel context.
7476  *
7477  */
7478 static void
ql_port_param(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)7479 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7480 {
7481 	uint8_t			*name;
7482 	ql_tgt_t		*tq;
7483 	EXT_PORT_PARAM		port_param = {0};
7484 	uint32_t		rval = QL_SUCCESS;
7485 	uint32_t		idma_rate;
7486 
7487 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7488 
7489 	if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7490 		EL(ha, "invalid request for this HBA\n");
7491 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7492 		cmd->ResponseLen = 0;
7493 		return;
7494 	}
7495 
7496 	if (LOOP_NOT_READY(ha)) {
7497 		EL(ha, "failed, loop not ready\n");
7498 		cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7499 		cmd->ResponseLen = 0;
7500 		return;
7501 	}
7502 
7503 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7504 	    (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7505 		EL(ha, "failed, ddi_copyin\n");
7506 		cmd->Status = EXT_STATUS_COPY_ERR;
7507 		cmd->ResponseLen = 0;
7508 		return;
7509 	}
7510 
7511 	if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7512 		EL(ha, "Unsupported dest lookup type: %xh\n",
7513 		    port_param.FCScsiAddr.DestType);
7514 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7515 		cmd->ResponseLen = 0;
7516 		return;
7517 	}
7518 
7519 	name = port_param.FCScsiAddr.DestAddr.WWPN;
7520 
7521 	QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7522 	    ha->instance, name[0], name[1], name[2], name[3], name[4],
7523 	    name[5], name[6], name[7]);
7524 
7525 	tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7526 	if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
7527 		EL(ha, "failed, fc_port not found\n");
7528 		cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7529 		cmd->ResponseLen = 0;
7530 		return;
7531 	}
7532 
7533 	cmd->Status = EXT_STATUS_OK;
7534 	cmd->DetailStatus = EXT_STATUS_OK;
7535 
7536 	switch (port_param.Mode) {
7537 	case EXT_IIDMA_MODE_GET:
7538 		/*
7539 		 * Report the firmware's port rate for the wwpn
7540 		 */
7541 		rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7542 		    port_param.Mode);
7543 
7544 		if (rval != QL_SUCCESS) {
7545 			EL(ha, "iidma get failed: %xh\n", rval);
7546 			cmd->Status = EXT_STATUS_MAILBOX;
7547 			cmd->DetailStatus = rval;
7548 			cmd->ResponseLen = 0;
7549 		} else {
7550 			switch (idma_rate) {
7551 			case IIDMA_RATE_1GB:
7552 				port_param.Speed =
7553 				    EXT_DEF_PORTSPEED_1GBIT;
7554 				break;
7555 			case IIDMA_RATE_2GB:
7556 				port_param.Speed =
7557 				    EXT_DEF_PORTSPEED_2GBIT;
7558 				break;
7559 			case IIDMA_RATE_4GB:
7560 				port_param.Speed =
7561 				    EXT_DEF_PORTSPEED_4GBIT;
7562 				break;
7563 			case IIDMA_RATE_8GB:
7564 				port_param.Speed =
7565 				    EXT_DEF_PORTSPEED_8GBIT;
7566 				break;
7567 			case IIDMA_RATE_10GB:
7568 				port_param.Speed =
7569 				    EXT_DEF_PORTSPEED_10GBIT;
7570 				break;
7571 			default:
7572 				port_param.Speed =
7573 				    EXT_DEF_PORTSPEED_UNKNOWN;
7574 				EL(ha, "failed, Port speed rate=%xh\n",
7575 				    idma_rate);
7576 				break;
7577 			}
7578 
7579 			/* Copy back the data */
7580 			rval = ddi_copyout((void *)&port_param,
7581 			    (void *)(uintptr_t)cmd->ResponseAdr,
7582 			    sizeof (EXT_PORT_PARAM), mode);
7583 
7584 			if (rval != 0) {
7585 				cmd->Status = EXT_STATUS_COPY_ERR;
7586 				cmd->ResponseLen = 0;
7587 				EL(ha, "failed, ddi_copyout\n");
7588 			} else {
7589 				cmd->ResponseLen = (uint32_t)
7590 				    sizeof (EXT_PORT_PARAM);
7591 			}
7592 		}
7593 		break;
7594 
7595 	case EXT_IIDMA_MODE_SET:
7596 		/*
7597 		 * Set the firmware's port rate for the wwpn
7598 		 */
7599 		switch (port_param.Speed) {
7600 		case EXT_DEF_PORTSPEED_1GBIT:
7601 			idma_rate = IIDMA_RATE_1GB;
7602 			break;
7603 		case EXT_DEF_PORTSPEED_2GBIT:
7604 			idma_rate = IIDMA_RATE_2GB;
7605 			break;
7606 		case EXT_DEF_PORTSPEED_4GBIT:
7607 			idma_rate = IIDMA_RATE_4GB;
7608 			break;
7609 		case EXT_DEF_PORTSPEED_8GBIT:
7610 			idma_rate = IIDMA_RATE_8GB;
7611 			break;
7612 		case EXT_DEF_PORTSPEED_10GBIT:
7613 			port_param.Speed = IIDMA_RATE_10GB;
7614 			break;
7615 		default:
7616 			EL(ha, "invalid set iidma rate: %x\n",
7617 			    port_param.Speed);
7618 			cmd->Status = EXT_STATUS_INVALID_PARAM;
7619 			cmd->ResponseLen = 0;
7620 			rval = QL_PARAMETER_ERROR;
7621 			break;
7622 		}
7623 
7624 		if (rval == QL_SUCCESS) {
7625 			rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7626 			    port_param.Mode);
7627 			if (rval != QL_SUCCESS) {
7628 				EL(ha, "iidma set failed: %xh\n", rval);
7629 				cmd->Status = EXT_STATUS_MAILBOX;
7630 				cmd->DetailStatus = rval;
7631 				cmd->ResponseLen = 0;
7632 			}
7633 		}
7634 		break;
7635 	default:
7636 		EL(ha, "invalid mode specified: %x\n", port_param.Mode);
7637 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7638 		cmd->ResponseLen = 0;
7639 		cmd->DetailStatus = 0;
7640 		break;
7641 	}
7642 
7643 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7644 }
7645 
7646 /*
7647  * ql_get_fwexttrace
7648  *	Dumps f/w extended trace buffer
7649  *
7650  * Input:
7651  *	ha:	adapter state pointer.
7652  *	bp:	buffer address.
7653  *	mode:	flags
7654  *
7655  * Returns:
7656  *
7657  * Context:
7658  *	Kernel context.
7659  */
7660 /* ARGSUSED */
7661 static void
ql_get_fwexttrace(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)7662 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7663 {
7664 	int	rval;
7665 	caddr_t	payload;
7666 
7667 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7668 
7669 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7670 		EL(ha, "invalid request for this HBA\n");
7671 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7672 		cmd->ResponseLen = 0;
7673 		return;
7674 	}
7675 
7676 	if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7677 	    (ha->fwexttracebuf.bp == NULL)) {
7678 		EL(ha, "f/w extended trace is not enabled\n");
7679 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7680 		cmd->ResponseLen = 0;
7681 		return;
7682 	}
7683 
7684 	if (cmd->ResponseLen < FWEXTSIZE) {
7685 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7686 		cmd->DetailStatus = FWEXTSIZE;
7687 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7688 		    cmd->ResponseLen, FWEXTSIZE);
7689 		cmd->ResponseLen = 0;
7690 		return;
7691 	}
7692 
7693 	/* Time Stamp */
7694 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7695 	if (rval != QL_SUCCESS) {
7696 		EL(ha, "f/w extended trace insert"
7697 		    "time stamp failed: %xh\n", rval);
7698 		cmd->Status = EXT_STATUS_ERR;
7699 		cmd->ResponseLen = 0;
7700 		return;
7701 	}
7702 
7703 	/* Disable Tracing */
7704 	rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7705 	if (rval != QL_SUCCESS) {
7706 		EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7707 		cmd->Status = EXT_STATUS_ERR;
7708 		cmd->ResponseLen = 0;
7709 		return;
7710 	}
7711 
7712 	/* Allocate payload buffer */
7713 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7714 	if (payload == NULL) {
7715 		EL(ha, "failed, kmem_zalloc\n");
7716 		cmd->Status = EXT_STATUS_NO_MEMORY;
7717 		cmd->ResponseLen = 0;
7718 		return;
7719 	}
7720 
7721 	/* Sync DMA buffer. */
7722 	(void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7723 	    FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7724 
7725 	/* Copy trace buffer data. */
7726 	ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7727 	    (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7728 	    DDI_DEV_AUTOINCR);
7729 
7730 	/* Send payload to application. */
7731 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7732 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7733 		EL(ha, "failed, send_buffer_data\n");
7734 		cmd->Status = EXT_STATUS_COPY_ERR;
7735 		cmd->ResponseLen = 0;
7736 	} else {
7737 		cmd->Status = EXT_STATUS_OK;
7738 	}
7739 
7740 	kmem_free(payload, FWEXTSIZE);
7741 
7742 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7743 }
7744 
7745 /*
7746  * ql_get_fwfcetrace
7747  *	Dumps f/w fibre channel event trace buffer
7748  *
7749  * Input:
7750  *	ha:	adapter state pointer.
7751  *	bp:	buffer address.
7752  *	mode:	flags
7753  *
7754  * Returns:
7755  *
7756  * Context:
7757  *	Kernel context.
7758  */
7759 /* ARGSUSED */
7760 static void
ql_get_fwfcetrace(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)7761 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7762 {
7763 	int	rval;
7764 	caddr_t	payload;
7765 
7766 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7767 
7768 	if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7769 		EL(ha, "invalid request for this HBA\n");
7770 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7771 		cmd->ResponseLen = 0;
7772 		return;
7773 	}
7774 
7775 	if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7776 	    (ha->fwfcetracebuf.bp == NULL)) {
7777 		EL(ha, "f/w FCE trace is not enabled\n");
7778 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7779 		cmd->ResponseLen = 0;
7780 		return;
7781 	}
7782 
7783 	if (cmd->ResponseLen < FWFCESIZE) {
7784 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7785 		cmd->DetailStatus = FWFCESIZE;
7786 		EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7787 		    cmd->ResponseLen, FWFCESIZE);
7788 		cmd->ResponseLen = 0;
7789 		return;
7790 	}
7791 
7792 	/* Disable Tracing */
7793 	rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7794 	if (rval != QL_SUCCESS) {
7795 		EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7796 		cmd->Status = EXT_STATUS_ERR;
7797 		cmd->ResponseLen = 0;
7798 		return;
7799 	}
7800 
7801 	/* Allocate payload buffer */
7802 	payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7803 	if (payload == NULL) {
7804 		EL(ha, "failed, kmem_zalloc\n");
7805 		cmd->Status = EXT_STATUS_NO_MEMORY;
7806 		cmd->ResponseLen = 0;
7807 		return;
7808 	}
7809 
7810 	/* Sync DMA buffer. */
7811 	(void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7812 	    FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7813 
7814 	/* Copy trace buffer data. */
7815 	ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7816 	    (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7817 	    DDI_DEV_AUTOINCR);
7818 
7819 	/* Send payload to application. */
7820 	if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7821 	    cmd->ResponseLen, mode) != cmd->ResponseLen) {
7822 		EL(ha, "failed, send_buffer_data\n");
7823 		cmd->Status = EXT_STATUS_COPY_ERR;
7824 		cmd->ResponseLen = 0;
7825 	} else {
7826 		cmd->Status = EXT_STATUS_OK;
7827 	}
7828 
7829 	kmem_free(payload, FWFCESIZE);
7830 
7831 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7832 }
7833 
7834 /*
7835  * ql_get_pci_data
7836  *	Retrieves pci config space data
7837  *
7838  * Input:
7839  *	ha:	adapter state pointer.
7840  *	cmd:	Local EXT_IOCTL cmd struct pointer.
7841  *	mode:	flags.
7842  *
7843  * Returns:
7844  *	None, request status indicated in cmd->Status.
7845  *
7846  * Context:
7847  *	Kernel context.
7848  *
7849  */
7850 static void
ql_get_pci_data(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)7851 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7852 {
7853 	uint8_t		cap_ptr;
7854 	uint8_t		cap_id;
7855 	uint32_t	buf_size = 256;
7856 
7857 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7858 
7859 	/*
7860 	 * First check the "Capabilities List" bit of the status register.
7861 	 */
7862 	if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7863 		/*
7864 		 * Now get the capability pointer
7865 		 */
7866 		cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7867 		while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7868 			/*
7869 			 * Check for the pcie capability.
7870 			 */
7871 			cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7872 			if (cap_id == PCI_CAP_ID_PCI_E) {
7873 				buf_size = 4096;
7874 				break;
7875 			}
7876 			cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7877 			    (cap_ptr + PCI_CAP_NEXT_PTR));
7878 		}
7879 	}
7880 
7881 	if (cmd->ResponseLen < buf_size) {
7882 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7883 		cmd->DetailStatus = buf_size;
7884 		EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7885 		    cmd->ResponseLen);
7886 		return;
7887 	}
7888 
7889 	/* Dump PCI config data. */
7890 	if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7891 	    buf_size, mode)) != 0) {
7892 		cmd->Status = EXT_STATUS_COPY_ERR;
7893 		cmd->DetailStatus = 0;
7894 		EL(ha, "failed, copy err pci_dump\n");
7895 	} else {
7896 		cmd->Status = EXT_STATUS_OK;
7897 		cmd->DetailStatus = buf_size;
7898 	}
7899 
7900 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7901 }
7902 
7903 /*
7904  * ql_pci_dump
7905  *	Dumps PCI config data to application buffer.
7906  *
7907  * Input:
7908  *	ha = adapter state pointer.
7909  *	bp = user buffer address.
7910  *
7911  * Returns:
7912  *
7913  * Context:
7914  *	Kernel context.
7915  */
7916 int
ql_pci_dump(ql_adapter_state_t * ha,uint32_t * bp,uint32_t pci_size,int mode)7917 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7918 {
7919 	uint32_t	pci_os;
7920 	uint32_t	*ptr32, *org_ptr32;
7921 
7922 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7923 
7924 	ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7925 	if (ptr32 == NULL) {
7926 		EL(ha, "failed kmem_zalloc\n");
7927 		return (ENOMEM);
7928 	}
7929 
7930 	/* store the initial value of ptr32 */
7931 	org_ptr32 = ptr32;
7932 	for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7933 		*ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7934 		LITTLE_ENDIAN_32(ptr32);
7935 		ptr32++;
7936 	}
7937 
7938 	if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7939 	    0) {
7940 		EL(ha, "failed ddi_copyout\n");
7941 		kmem_free(org_ptr32, pci_size);
7942 		return (EFAULT);
7943 	}
7944 
7945 	QL_DUMP_9(org_ptr32, 8, pci_size);
7946 
7947 	kmem_free(org_ptr32, pci_size);
7948 
7949 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7950 
7951 	return (0);
7952 }
7953 
7954 /*
7955  * ql_menlo_reset
7956  *	Reset Menlo
7957  *
7958  * Input:
7959  *	ha:	adapter state pointer.
7960  *	bp:	buffer address.
7961  *	mode:	flags
7962  *
7963  * Returns:
7964  *
7965  * Context:
7966  *	Kernel context.
7967  */
7968 static void
ql_menlo_reset(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)7969 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7970 {
7971 	EXT_MENLO_RESET	rst;
7972 	ql_mbx_data_t	mr;
7973 	int		rval;
7974 
7975 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7976 
7977 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7978 		EL(ha, "failed, invalid request for HBA\n");
7979 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
7980 		cmd->ResponseLen = 0;
7981 		return;
7982 	}
7983 
7984 	/*
7985 	 * TODO: only vp_index 0 can do this (?)
7986 	 */
7987 
7988 	/*  Verify the size of request structure. */
7989 	if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
7990 		/* Return error */
7991 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7992 		    sizeof (EXT_MENLO_RESET));
7993 		cmd->Status = EXT_STATUS_INVALID_PARAM;
7994 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7995 		cmd->ResponseLen = 0;
7996 		return;
7997 	}
7998 
7999 	/* Get reset request. */
8000 	if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
8001 	    (void *)&rst, sizeof (EXT_MENLO_RESET), mode) != 0) {
8002 		EL(ha, "failed, ddi_copyin\n");
8003 		cmd->Status = EXT_STATUS_COPY_ERR;
8004 		cmd->ResponseLen = 0;
8005 		return;
8006 	}
8007 
8008 	/* Wait for I/O to stop and daemon to stall. */
8009 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8010 		EL(ha, "ql_stall_driver failed\n");
8011 		ql_restart_hba(ha);
8012 		cmd->Status = EXT_STATUS_BUSY;
8013 		cmd->ResponseLen = 0;
8014 		return;
8015 	}
8016 
8017 	rval = ql_reset_menlo(ha, &mr, rst.Flags);
8018 	if (rval != QL_SUCCESS) {
8019 		EL(ha, "failed, status=%xh\n", rval);
8020 		cmd->Status = EXT_STATUS_MAILBOX;
8021 		cmd->DetailStatus = rval;
8022 		cmd->ResponseLen = 0;
8023 	} else if (mr.mb[1] != 0) {
8024 		EL(ha, "failed, substatus=%d\n", mr.mb[1]);
8025 		cmd->Status = EXT_STATUS_ERR;
8026 		cmd->DetailStatus = mr.mb[1];
8027 		cmd->ResponseLen = 0;
8028 	}
8029 
8030 	ql_restart_hba(ha);
8031 
8032 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8033 }
8034 
8035 /*
8036  * ql_menlo_get_fw_version
8037  *	Get Menlo firmware version.
8038  *
8039  * Input:
8040  *	ha:	adapter state pointer.
8041  *	bp:	buffer address.
8042  *	mode:	flags
8043  *
8044  * Returns:
8045  *
8046  * Context:
8047  *	Kernel context.
8048  */
8049 static void
ql_menlo_get_fw_version(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8050 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8051 {
8052 	int				rval;
8053 	ql_mbx_iocb_t			*pkt;
8054 	EXT_MENLO_GET_FW_VERSION	ver = {0};
8055 
8056 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8057 
8058 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8059 		EL(ha, "failed, invalid request for HBA\n");
8060 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8061 		cmd->ResponseLen = 0;
8062 		return;
8063 	}
8064 
8065 	if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
8066 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8067 		cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
8068 		EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
8069 		    sizeof (EXT_MENLO_GET_FW_VERSION));
8070 		cmd->ResponseLen = 0;
8071 		return;
8072 	}
8073 
8074 	/* Allocate packet. */
8075 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8076 	if (pkt == NULL) {
8077 		EL(ha, "failed, kmem_zalloc\n");
8078 		cmd->Status = EXT_STATUS_NO_MEMORY;
8079 		cmd->ResponseLen = 0;
8080 		return;
8081 	}
8082 
8083 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8084 	pkt->mvfy.entry_count = 1;
8085 	pkt->mvfy.options_status = LE_16(VMF_DO_NOT_UPDATE_FW);
8086 
8087 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8088 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8089 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8090 	ver.FwVersion = LE_32(pkt->mvfy.fw_version);
8091 
8092 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8093 	    pkt->mvfy.options_status != CS_COMPLETE) {
8094 		/* Command error */
8095 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8096 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8097 		    pkt->mvfy.failure_code);
8098 		cmd->Status = EXT_STATUS_ERR;
8099 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8100 		    QL_FUNCTION_FAILED;
8101 		cmd->ResponseLen = 0;
8102 	} else if (ddi_copyout((void *)&ver,
8103 	    (void *)(uintptr_t)cmd->ResponseAdr,
8104 	    sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
8105 		EL(ha, "failed, ddi_copyout\n");
8106 		cmd->Status = EXT_STATUS_COPY_ERR;
8107 		cmd->ResponseLen = 0;
8108 	} else {
8109 		cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
8110 	}
8111 
8112 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8113 
8114 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8115 }
8116 
8117 /*
8118  * ql_menlo_update_fw
8119  *	Get Menlo update firmware.
8120  *
8121  * Input:
8122  *	ha:	adapter state pointer.
8123  *	bp:	buffer address.
8124  *	mode:	flags
8125  *
8126  * Returns:
8127  *
8128  * Context:
8129  *	Kernel context.
8130  */
8131 static void
ql_menlo_update_fw(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8132 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8133 {
8134 	ql_mbx_iocb_t		*pkt;
8135 	dma_mem_t		*dma_mem;
8136 	EXT_MENLO_UPDATE_FW	fw;
8137 	uint32_t		*ptr32;
8138 	int			rval;
8139 
8140 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8141 
8142 	if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8143 		EL(ha, "failed, invalid request for HBA\n");
8144 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8145 		cmd->ResponseLen = 0;
8146 		return;
8147 	}
8148 
8149 	/*
8150 	 * TODO: only vp_index 0 can do this (?)
8151 	 */
8152 
8153 	/*  Verify the size of request structure. */
8154 	if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
8155 		/* Return error */
8156 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8157 		    sizeof (EXT_MENLO_UPDATE_FW));
8158 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8159 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8160 		cmd->ResponseLen = 0;
8161 		return;
8162 	}
8163 
8164 	/* Get update fw request. */
8165 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr, (caddr_t)&fw,
8166 	    sizeof (EXT_MENLO_UPDATE_FW), mode) != 0) {
8167 		EL(ha, "failed, ddi_copyin\n");
8168 		cmd->Status = EXT_STATUS_COPY_ERR;
8169 		cmd->ResponseLen = 0;
8170 		return;
8171 	}
8172 
8173 	/* Wait for I/O to stop and daemon to stall. */
8174 	if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8175 		EL(ha, "ql_stall_driver failed\n");
8176 		ql_restart_hba(ha);
8177 		cmd->Status = EXT_STATUS_BUSY;
8178 		cmd->ResponseLen = 0;
8179 		return;
8180 	}
8181 
8182 	/* Allocate packet. */
8183 	dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
8184 	if (dma_mem == NULL) {
8185 		EL(ha, "failed, kmem_zalloc\n");
8186 		cmd->Status = EXT_STATUS_NO_MEMORY;
8187 		cmd->ResponseLen = 0;
8188 		return;
8189 	}
8190 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8191 	if (pkt == NULL) {
8192 		EL(ha, "failed, kmem_zalloc\n");
8193 		kmem_free(dma_mem, sizeof (dma_mem_t));
8194 		ql_restart_hba(ha);
8195 		cmd->Status = EXT_STATUS_NO_MEMORY;
8196 		cmd->ResponseLen = 0;
8197 		return;
8198 	}
8199 
8200 	/* Get DMA memory for the IOCB */
8201 	if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
8202 	    QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8203 		cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8204 		    "alloc failed", QL_NAME, ha->instance);
8205 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8206 		kmem_free(dma_mem, sizeof (dma_mem_t));
8207 		ql_restart_hba(ha);
8208 		cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8209 		cmd->ResponseLen = 0;
8210 		return;
8211 	}
8212 
8213 	/* Get firmware data. */
8214 	if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
8215 	    fw.TotalByteCount, mode) != fw.TotalByteCount) {
8216 		EL(ha, "failed, get_buffer_data\n");
8217 		ql_free_dma_resource(ha, dma_mem);
8218 		kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8219 		kmem_free(dma_mem, sizeof (dma_mem_t));
8220 		ql_restart_hba(ha);
8221 		cmd->Status = EXT_STATUS_COPY_ERR;
8222 		cmd->ResponseLen = 0;
8223 		return;
8224 	}
8225 
8226 	/* Sync DMA buffer. */
8227 	(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8228 	    DDI_DMA_SYNC_FORDEV);
8229 
8230 	pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8231 	pkt->mvfy.entry_count = 1;
8232 	pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
8233 	ptr32 = dma_mem->bp;
8234 	pkt->mvfy.fw_version = LE_32(ptr32[2]);
8235 	pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
8236 	pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
8237 	pkt->mvfy.dseg_count = LE_16(1);
8238 	pkt->mvfy.dseg_0_address[0] = (uint32_t)
8239 	    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8240 	pkt->mvfy.dseg_0_address[1] = (uint32_t)
8241 	    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8242 	pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
8243 
8244 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8245 	LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8246 	LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8247 
8248 	if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8249 	    pkt->mvfy.options_status != CS_COMPLETE) {
8250 		/* Command error */
8251 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8252 		    pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8253 		    pkt->mvfy.failure_code);
8254 		cmd->Status = EXT_STATUS_ERR;
8255 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8256 		    QL_FUNCTION_FAILED;
8257 		cmd->ResponseLen = 0;
8258 	}
8259 
8260 	ql_free_dma_resource(ha, dma_mem);
8261 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8262 	kmem_free(dma_mem, sizeof (dma_mem_t));
8263 	ql_restart_hba(ha);
8264 
8265 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8266 }
8267 
8268 /*
8269  * ql_menlo_manage_info
8270  *	Get Menlo manage info.
8271  *
8272  * Input:
8273  *	ha:	adapter state pointer.
8274  *	bp:	buffer address.
8275  *	mode:	flags
8276  *
8277  * Returns:
8278  *
8279  * Context:
8280  *	Kernel context.
8281  */
8282 static void
ql_menlo_manage_info(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8283 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8284 {
8285 	ql_mbx_iocb_t		*pkt;
8286 	dma_mem_t		*dma_mem = NULL;
8287 	EXT_MENLO_MANAGE_INFO	info;
8288 	int			rval;
8289 
8290 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8291 
8292 
8293 	/* The call is only supported for Schultz right now */
8294 	if (CFG_IST(ha, CFG_CTRL_8081)) {
8295 		ql_get_xgmac_statistics(ha, cmd, mode);
8296 		QL_PRINT_9(CE_CONT, "(%d): CFG_CTRL_81XX done\n",
8297 		    ha->instance);
8298 		return;
8299 	}
8300 
8301 	if (!CFG_IST(ha, CFG_CTRL_8081) || !CFG_IST(ha, CFG_CTRL_MENLO)) {
8302 		EL(ha, "failed, invalid request for HBA\n");
8303 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8304 		cmd->ResponseLen = 0;
8305 		return;
8306 	}
8307 
8308 	/*  Verify the size of request structure. */
8309 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8310 		/* Return error */
8311 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8312 		    sizeof (EXT_MENLO_MANAGE_INFO));
8313 		cmd->Status = EXT_STATUS_INVALID_PARAM;
8314 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8315 		cmd->ResponseLen = 0;
8316 		return;
8317 	}
8318 
8319 	/* Get manage info request. */
8320 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8321 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8322 		EL(ha, "failed, ddi_copyin\n");
8323 		cmd->Status = EXT_STATUS_COPY_ERR;
8324 		cmd->ResponseLen = 0;
8325 		return;
8326 	}
8327 
8328 	/* Allocate packet. */
8329 	pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8330 	if (pkt == NULL) {
8331 		EL(ha, "failed, kmem_zalloc\n");
8332 		ql_restart_driver(ha);
8333 		cmd->Status = EXT_STATUS_NO_MEMORY;
8334 		cmd->ResponseLen = 0;
8335 		return;
8336 	}
8337 
8338 	pkt->mdata.entry_type = MENLO_DATA_TYPE;
8339 	pkt->mdata.entry_count = 1;
8340 	pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
8341 
8342 	/* Get DMA memory for the IOCB */
8343 	if (info.Operation == MENLO_OP_READ_MEM ||
8344 	    info.Operation == MENLO_OP_WRITE_MEM) {
8345 		pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
8346 		pkt->mdata.parameter_1 =
8347 		    LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8348 		dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8349 		    KM_SLEEP);
8350 		if (dma_mem == NULL) {
8351 			EL(ha, "failed, kmem_zalloc\n");
8352 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8353 			cmd->Status = EXT_STATUS_NO_MEMORY;
8354 			cmd->ResponseLen = 0;
8355 			return;
8356 		}
8357 		if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8358 		    LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8359 			cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8360 			    "alloc failed", QL_NAME, ha->instance);
8361 			kmem_free(dma_mem, sizeof (dma_mem_t));
8362 			kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8363 			cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8364 			cmd->ResponseLen = 0;
8365 			return;
8366 		}
8367 		if (info.Operation == MENLO_OP_WRITE_MEM) {
8368 			/* Get data. */
8369 			if (ql_get_buffer_data(
8370 			    (caddr_t)(uintptr_t)info.pDataBytes,
8371 			    dma_mem->bp, info.TotalByteCount, mode) !=
8372 			    info.TotalByteCount) {
8373 				EL(ha, "failed, get_buffer_data\n");
8374 				ql_free_dma_resource(ha, dma_mem);
8375 				kmem_free(dma_mem, sizeof (dma_mem_t));
8376 				kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8377 				cmd->Status = EXT_STATUS_COPY_ERR;
8378 				cmd->ResponseLen = 0;
8379 				return;
8380 			}
8381 			(void) ddi_dma_sync(dma_mem->dma_handle, 0,
8382 			    dma_mem->size, DDI_DMA_SYNC_FORDEV);
8383 		}
8384 		pkt->mdata.dseg_count = LE_16(1);
8385 		pkt->mdata.dseg_0_address[0] = (uint32_t)
8386 		    LE_32(LSD(dma_mem->cookie.dmac_laddress));
8387 		pkt->mdata.dseg_0_address[1] = (uint32_t)
8388 		    LE_32(MSD(dma_mem->cookie.dmac_laddress));
8389 		pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
8390 	} else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8391 		pkt->mdata.parameter_1 =
8392 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8393 		pkt->mdata.parameter_2 =
8394 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8395 		pkt->mdata.parameter_3 =
8396 		    LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8397 	} else if (info.Operation & MENLO_OP_GET_INFO) {
8398 		pkt->mdata.parameter_1 =
8399 		    LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8400 		pkt->mdata.parameter_2 =
8401 		    LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8402 	}
8403 
8404 	rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8405 	LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8406 	LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8407 
8408 	if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8409 	    pkt->mdata.options_status != CS_COMPLETE) {
8410 		/* Command error */
8411 		EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8412 		    pkt->mdata.entry_status & 0x3c, pkt->mdata.options_status,
8413 		    pkt->mdata.failure_code);
8414 		cmd->Status = EXT_STATUS_ERR;
8415 		cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8416 		    QL_FUNCTION_FAILED;
8417 		cmd->ResponseLen = 0;
8418 	} else if (info.Operation == MENLO_OP_READ_MEM) {
8419 		(void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8420 		    DDI_DMA_SYNC_FORKERNEL);
8421 		if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8422 		    dma_mem->bp, info.TotalByteCount, mode) !=
8423 		    info.TotalByteCount) {
8424 			cmd->Status = EXT_STATUS_COPY_ERR;
8425 			cmd->ResponseLen = 0;
8426 		}
8427 	}
8428 
8429 	ql_free_dma_resource(ha, dma_mem);
8430 	kmem_free(dma_mem, sizeof (dma_mem_t));
8431 	kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8432 
8433 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8434 }
8435 
8436 /*
8437  * ql_suspend_hba
8438  *	Suspends all adapter ports.
8439  *
8440  * Input:
8441  *	ha:		adapter state pointer.
8442  *	options:	BIT_0 --> leave driver stalled on exit if
8443  *				  failed.
8444  *
8445  * Returns:
8446  *	ql local function return status code.
8447  *
8448  * Context:
8449  *	Kernel context.
8450  */
8451 static int
ql_suspend_hba(ql_adapter_state_t * ha,uint32_t opt)8452 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8453 {
8454 	ql_adapter_state_t	*ha2;
8455 	ql_link_t		*link;
8456 	int			rval = QL_SUCCESS;
8457 
8458 	/* Quiesce I/O on all adapter ports */
8459 	for (link = ql_hba.first; link != NULL; link = link->next) {
8460 		ha2 = link->base_address;
8461 
8462 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8463 			continue;
8464 		}
8465 
8466 		if ((rval = ql_stall_driver(ha2, opt)) != QL_SUCCESS) {
8467 			EL(ha, "ql_stall_driver status=%xh\n", rval);
8468 			break;
8469 		}
8470 	}
8471 
8472 	return (rval);
8473 }
8474 
8475 /*
8476  * ql_restart_hba
8477  *	Restarts adapter.
8478  *
8479  * Input:
8480  *	ha:	adapter state pointer.
8481  *
8482  * Context:
8483  *	Kernel context.
8484  */
8485 static void
ql_restart_hba(ql_adapter_state_t * ha)8486 ql_restart_hba(ql_adapter_state_t *ha)
8487 {
8488 	ql_adapter_state_t	*ha2;
8489 	ql_link_t		*link;
8490 
8491 	/* Resume I/O on all adapter ports */
8492 	for (link = ql_hba.first; link != NULL; link = link->next) {
8493 		ha2 = link->base_address;
8494 
8495 		if (ha2->fru_hba_index != ha->fru_hba_index) {
8496 			continue;
8497 		}
8498 
8499 		ql_restart_driver(ha2);
8500 	}
8501 }
8502 
8503 /*
8504  * ql_get_vp_cnt_id
8505  *	Retrieves pci config space data
8506  *
8507  * Input:
8508  *	ha:	adapter state pointer.
8509  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8510  *	mode:	flags.
8511  *
8512  * Returns:
8513  *	None, request status indicated in cmd->Status.
8514  *
8515  * Context:
8516  *	Kernel context.
8517  *
8518  */
8519 static void
ql_get_vp_cnt_id(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8520 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8521 {
8522 	ql_adapter_state_t	*vha;
8523 	PEXT_VPORT_ID_CNT	ptmp_vp;
8524 	int			id = 0;
8525 	int			rval;
8526 	char			name[MAXPATHLEN];
8527 
8528 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8529 
8530 	/*
8531 	 * To be backward compatible with older API
8532 	 * check for the size of old EXT_VPORT_ID_CNT
8533 	 */
8534 	if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
8535 	    (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
8536 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8537 		cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
8538 		EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
8539 		    cmd->ResponseLen);
8540 		cmd->ResponseLen = 0;
8541 		return;
8542 	}
8543 
8544 	ptmp_vp = (EXT_VPORT_ID_CNT *)
8545 	    kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
8546 	if (ptmp_vp == NULL) {
8547 		EL(ha, "failed, kmem_zalloc\n");
8548 		cmd->ResponseLen = 0;
8549 		return;
8550 	}
8551 	vha = ha->vp_next;
8552 	while (vha != NULL) {
8553 		ptmp_vp->VpCnt++;
8554 		ptmp_vp->VpId[id] = vha->vp_index;
8555 		(void) ddi_pathname(vha->dip, name);
8556 		(void) strcpy((char *)ptmp_vp->vp_path[id], name);
8557 		ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
8558 		id++;
8559 		vha = vha->vp_next;
8560 	}
8561 	rval = ddi_copyout((void *)ptmp_vp,
8562 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8563 	    cmd->ResponseLen, mode);
8564 	if (rval != 0) {
8565 		cmd->Status = EXT_STATUS_COPY_ERR;
8566 		cmd->ResponseLen = 0;
8567 		EL(ha, "failed, ddi_copyout\n");
8568 	} else {
8569 		cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
8570 		QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n",
8571 		    ha->instance, ptmp_vp->VpCnt);
8572 	}
8573 
8574 }
8575 
8576 /*
8577  * ql_vp_ioctl
8578  *	Performs all EXT_CC_VPORT_CMD functions.
8579  *
8580  * Input:
8581  *	ha:	adapter state pointer.
8582  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8583  *	mode:	flags.
8584  *
8585  * Returns:
8586  *	None, request status indicated in cmd->Status.
8587  *
8588  * Context:
8589  *	Kernel context.
8590  */
8591 static void
ql_vp_ioctl(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8592 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8593 {
8594 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
8595 	    cmd->SubCode);
8596 
8597 	/* case off on command subcode */
8598 	switch (cmd->SubCode) {
8599 	case EXT_VF_SC_VPORT_GETINFO:
8600 		ql_qry_vport(ha, cmd, mode);
8601 		break;
8602 	default:
8603 		/* function not supported. */
8604 		cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
8605 		EL(ha, "failed, Unsupported Subcode=%xh\n",
8606 		    cmd->SubCode);
8607 		break;
8608 	}
8609 
8610 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8611 }
8612 
8613 /*
8614  * ql_qry_vport
8615  *	Performs EXT_VF_SC_VPORT_GETINFO subfunction.
8616  *
8617  * Input:
8618  *	ha:	adapter state pointer.
8619  *	cmd:	EXT_IOCTL cmd struct pointer.
8620  *	mode:	flags.
8621  *
8622  * Returns:
8623  *	None, request status indicated in cmd->Status.
8624  *
8625  * Context:
8626  *	Kernel context.
8627  */
8628 static void
ql_qry_vport(ql_adapter_state_t * vha,EXT_IOCTL * cmd,int mode)8629 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
8630 {
8631 	ql_adapter_state_t	*tmp_vha;
8632 	EXT_VPORT_INFO		tmp_vport = {0};
8633 	int			max_vport;
8634 
8635 	QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance);
8636 
8637 	if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
8638 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8639 		cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
8640 		EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
8641 		    cmd->ResponseLen);
8642 		cmd->ResponseLen = 0;
8643 		return;
8644 	}
8645 
8646 	/* Fill in the vport information. */
8647 	bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
8648 	    EXT_DEF_WWN_NAME_SIZE);
8649 	bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
8650 	    EXT_DEF_WWN_NAME_SIZE);
8651 	tmp_vport.state = vha->state;
8652 	tmp_vport.id = vha->vp_index;
8653 
8654 	tmp_vha = vha->pha->vp_next;
8655 	while (tmp_vha != NULL) {
8656 		tmp_vport.used++;
8657 		tmp_vha = tmp_vha->vp_next;
8658 	}
8659 
8660 	max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
8661 	    MAX_25_VIRTUAL_PORTS);
8662 	if (max_vport > tmp_vport.used) {
8663 		tmp_vport.free = max_vport - tmp_vport.used;
8664 	}
8665 
8666 	if (ddi_copyout((void *)&tmp_vport,
8667 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8668 	    sizeof (EXT_VPORT_INFO), mode) != 0) {
8669 		cmd->Status = EXT_STATUS_COPY_ERR;
8670 		cmd->ResponseLen = 0;
8671 		EL(vha, "failed, ddi_copyout\n");
8672 	} else {
8673 		cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
8674 		QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance);
8675 	}
8676 }
8677 
8678 /*
8679  * ql_access_flash
8680  *	Performs all EXT_CC_ACCESS_FLASH_OS functions.
8681  *
8682  * Input:
8683  *	pi:	port info pointer.
8684  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8685  *	mode:	flags.
8686  *
8687  * Returns:
8688  *	None, request status indicated in cmd->Status.
8689  *
8690  * Context:
8691  *	Kernel context.
8692  */
8693 static void
ql_access_flash(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8694 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8695 {
8696 	int	rval;
8697 
8698 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8699 
8700 	switch (cmd->SubCode) {
8701 	case EXT_SC_FLASH_READ:
8702 		if ((rval = ql_flash_fcode_dump(ha,
8703 		    (void *)(uintptr_t)(cmd->ResponseAdr),
8704 		    (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
8705 			cmd->Status = EXT_STATUS_COPY_ERR;
8706 			cmd->ResponseLen = 0;
8707 			EL(ha, "flash_fcode_dump status=%xh\n", rval);
8708 		}
8709 		break;
8710 	case EXT_SC_FLASH_WRITE:
8711 		if ((rval = ql_r_m_w_flash(ha,
8712 		    (void *)(uintptr_t)(cmd->RequestAdr),
8713 		    (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
8714 		    QL_SUCCESS) {
8715 			cmd->Status = EXT_STATUS_COPY_ERR;
8716 			cmd->ResponseLen = 0;
8717 			EL(ha, "r_m_w_flash status=%xh\n", rval);
8718 		} else {
8719 			/* Reset caches on all adapter instances. */
8720 			ql_update_flash_caches(ha);
8721 		}
8722 		break;
8723 	default:
8724 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8725 		cmd->Status = EXT_STATUS_ERR;
8726 		cmd->ResponseLen = 0;
8727 		break;
8728 	}
8729 
8730 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8731 }
8732 
8733 /*
8734  * ql_reset_cmd
8735  *	Performs all EXT_CC_RESET_FW_OS functions.
8736  *
8737  * Input:
8738  *	ha:	adapter state pointer.
8739  *	cmd:	Local EXT_IOCTL cmd struct pointer.
8740  *
8741  * Returns:
8742  *	None, request status indicated in cmd->Status.
8743  *
8744  * Context:
8745  *	Kernel context.
8746  */
8747 static void
ql_reset_cmd(ql_adapter_state_t * ha,EXT_IOCTL * cmd)8748 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
8749 {
8750 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8751 
8752 	switch (cmd->SubCode) {
8753 	case EXT_SC_RESET_FC_FW:
8754 		EL(ha, "isp_abort_needed\n");
8755 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
8756 		break;
8757 	case EXT_SC_RESET_MPI_FW:
8758 		if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8759 			EL(ha, "invalid request for HBA\n");
8760 			cmd->Status = EXT_STATUS_INVALID_REQUEST;
8761 			cmd->ResponseLen = 0;
8762 		} else {
8763 			/* Wait for I/O to stop and daemon to stall. */
8764 			if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8765 				EL(ha, "ql_suspend_hba failed\n");
8766 				cmd->Status = EXT_STATUS_BUSY;
8767 				cmd->ResponseLen = 0;
8768 			} else if (ql_restart_mpi(ha) != QL_SUCCESS) {
8769 				cmd->Status = EXT_STATUS_ERR;
8770 				cmd->ResponseLen = 0;
8771 			} else {
8772 				uint8_t	timer;
8773 				/*
8774 				 * While the restart_mpi mailbox cmd may be
8775 				 * done the MPI is not. Wait at least 6 sec. or
8776 				 * exit if the loop comes up.
8777 				 */
8778 				for (timer = 6; timer; timer--) {
8779 					if (!(ha->task_daemon_flags &
8780 					    LOOP_DOWN)) {
8781 						break;
8782 					}
8783 					/* Delay for 1 second. */
8784 					ql_delay(ha, 1000000);
8785 				}
8786 			}
8787 			ql_restart_hba(ha);
8788 		}
8789 		break;
8790 	default:
8791 		EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8792 		cmd->Status = EXT_STATUS_ERR;
8793 		cmd->ResponseLen = 0;
8794 		break;
8795 	}
8796 
8797 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8798 }
8799 
8800 /*
8801  * ql_get_dcbx_parameters
8802  *	Get DCBX parameters.
8803  *
8804  * Input:
8805  *	ha:	adapter state pointer.
8806  *	cmd:	User space CT arguments pointer.
8807  *	mode:	flags.
8808  */
8809 static void
ql_get_dcbx_parameters(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8810 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8811 {
8812 	uint8_t		*tmp_buf;
8813 	int		rval;
8814 
8815 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8816 
8817 	if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8818 		EL(ha, "invalid request for HBA\n");
8819 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8820 		cmd->ResponseLen = 0;
8821 		return;
8822 	}
8823 
8824 	/* Allocate memory for command. */
8825 	tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP);
8826 	if (tmp_buf == NULL) {
8827 		EL(ha, "failed, kmem_zalloc\n");
8828 		cmd->Status = EXT_STATUS_NO_MEMORY;
8829 		cmd->ResponseLen = 0;
8830 		return;
8831 	}
8832 	/* Send command */
8833 	rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE,
8834 	    (caddr_t)tmp_buf);
8835 	if (rval != QL_SUCCESS) {
8836 		/* error */
8837 		EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval);
8838 		kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8839 		cmd->Status = EXT_STATUS_ERR;
8840 		cmd->ResponseLen = 0;
8841 		return;
8842 	}
8843 
8844 	/* Copy the response */
8845 	if (ql_send_buffer_data((caddr_t)tmp_buf,
8846 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
8847 	    EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) {
8848 		EL(ha, "failed, ddi_copyout\n");
8849 		cmd->Status = EXT_STATUS_COPY_ERR;
8850 		cmd->ResponseLen = 0;
8851 	} else {
8852 		cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE;
8853 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8854 	}
8855 	kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8856 
8857 }
8858 
8859 /*
8860  * ql_qry_cna_port
8861  *	Performs EXT_SC_QUERY_CNA_PORT subfunction.
8862  *
8863  * Input:
8864  *	ha:	adapter state pointer.
8865  *	cmd:	EXT_IOCTL cmd struct pointer.
8866  *	mode:	flags.
8867  *
8868  * Returns:
8869  *	None, request status indicated in cmd->Status.
8870  *
8871  * Context:
8872  *	Kernel context.
8873  */
8874 static void
ql_qry_cna_port(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8875 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8876 {
8877 	EXT_CNA_PORT	cna_port = {0};
8878 
8879 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8880 
8881 	if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8882 		EL(ha, "invalid request for HBA\n");
8883 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
8884 		cmd->ResponseLen = 0;
8885 		return;
8886 	}
8887 
8888 	if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) {
8889 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8890 		cmd->DetailStatus = sizeof (EXT_CNA_PORT);
8891 		EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n",
8892 		    cmd->ResponseLen);
8893 		cmd->ResponseLen = 0;
8894 		return;
8895 	}
8896 
8897 	cna_port.VLanId = ha->fcoe_vlan_id;
8898 	cna_port.FabricParam = ha->fabric_params;
8899 	bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress,
8900 	    EXT_DEF_MAC_ADDRESS_SIZE);
8901 
8902 	if (ddi_copyout((void *)&cna_port,
8903 	    (void *)(uintptr_t)(cmd->ResponseAdr),
8904 	    sizeof (EXT_CNA_PORT), mode) != 0) {
8905 		cmd->Status = EXT_STATUS_COPY_ERR;
8906 		cmd->ResponseLen = 0;
8907 		EL(ha, "failed, ddi_copyout\n");
8908 	} else {
8909 		cmd->ResponseLen = sizeof (EXT_CNA_PORT);
8910 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8911 	}
8912 }
8913 
8914 /*
8915  * ql_qry_adapter_versions
8916  *	Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction.
8917  *
8918  * Input:
8919  *	ha:	adapter state pointer.
8920  *	cmd:	EXT_IOCTL cmd struct pointer.
8921  *	mode:	flags.
8922  *
8923  * Returns:
8924  *	None, request status indicated in cmd->Status.
8925  *
8926  * Context:
8927  *	Kernel context.
8928  */
8929 static void
ql_qry_adapter_versions(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)8930 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd,
8931     int mode)
8932 {
8933 	uint8_t				is_8142, mpi_cap;
8934 	uint32_t			ver_len, transfer_size;
8935 	PEXT_ADAPTERREGIONVERSION	padapter_ver = NULL;
8936 
8937 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8938 
8939 	/* 8142s do not have a EDC PHY firmware. */
8940 	mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8);
8941 
8942 	is_8142 = 0;
8943 	/* Sizeof (Length + Reserved) = 8 Bytes */
8944 	if (mpi_cap == 0x02 || mpi_cap == 0x04) {
8945 		ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1))
8946 		    + 8;
8947 		is_8142 = 1;
8948 	} else {
8949 		ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8;
8950 	}
8951 
8952 	/* Allocate local memory for EXT_ADAPTERREGIONVERSION */
8953 	padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len,
8954 	    KM_SLEEP);
8955 
8956 	if (padapter_ver == NULL) {
8957 		EL(ha, "failed, kmem_zalloc\n");
8958 		cmd->Status = EXT_STATUS_NO_MEMORY;
8959 		cmd->ResponseLen = 0;
8960 		return;
8961 	}
8962 
8963 	padapter_ver->Length = 1;
8964 	/* Copy MPI version */
8965 	padapter_ver->RegionVersion[0].Region =
8966 	    EXT_OPT_ROM_REGION_MPI_RISC_FW;
8967 	padapter_ver->RegionVersion[0].Version[0] =
8968 	    ha->mpi_fw_major_version;
8969 	padapter_ver->RegionVersion[0].Version[1] =
8970 	    ha->mpi_fw_minor_version;
8971 	padapter_ver->RegionVersion[0].Version[2] =
8972 	    ha->mpi_fw_subminor_version;
8973 	padapter_ver->RegionVersion[0].VersionLength = 3;
8974 	padapter_ver->RegionVersion[0].Location = RUNNING_VERSION;
8975 
8976 	if (!is_8142) {
8977 		padapter_ver->RegionVersion[1].Region =
8978 		    EXT_OPT_ROM_REGION_EDC_PHY_FW;
8979 		padapter_ver->RegionVersion[1].Version[0] =
8980 		    ha->phy_fw_major_version;
8981 		padapter_ver->RegionVersion[1].Version[1] =
8982 		    ha->phy_fw_minor_version;
8983 		padapter_ver->RegionVersion[1].Version[2] =
8984 		    ha->phy_fw_subminor_version;
8985 		padapter_ver->RegionVersion[1].VersionLength = 3;
8986 		padapter_ver->RegionVersion[1].Location = RUNNING_VERSION;
8987 		padapter_ver->Length = NO_OF_VERSIONS;
8988 	}
8989 
8990 	if (cmd->ResponseLen < ver_len) {
8991 		EL(ha, "failed, ResponseLen < ver_len, ",
8992 		    "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len);
8993 		/* Calculate the No. of valid versions being returned. */
8994 		padapter_ver->Length = (uint32_t)
8995 		    ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION));
8996 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8997 		cmd->DetailStatus = ver_len;
8998 		transfer_size = cmd->ResponseLen;
8999 	} else {
9000 		transfer_size = ver_len;
9001 	}
9002 
9003 	if (ddi_copyout((void *)padapter_ver,
9004 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9005 	    transfer_size, mode) != 0) {
9006 		cmd->Status = EXT_STATUS_COPY_ERR;
9007 		cmd->ResponseLen = 0;
9008 		EL(ha, "failed, ddi_copyout\n");
9009 	} else {
9010 		cmd->ResponseLen = ver_len;
9011 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9012 	}
9013 
9014 	kmem_free(padapter_ver, ver_len);
9015 }
9016 
9017 /*
9018  * ql_get_xgmac_statistics
9019  *	Get XgMac information
9020  *
9021  * Input:
9022  *	ha:	adapter state pointer.
9023  *	cmd:	EXT_IOCTL cmd struct pointer.
9024  *	mode:	flags.
9025  *
9026  * Returns:
9027  *	None, request status indicated in cmd->Status.
9028  *
9029  * Context:
9030  *	Kernel context.
9031  */
9032 static void
ql_get_xgmac_statistics(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9033 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9034 {
9035 	int			rval;
9036 	uint32_t		size;
9037 	int8_t			*tmp_buf;
9038 	EXT_MENLO_MANAGE_INFO	info;
9039 
9040 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9041 
9042 	/*  Verify the size of request structure. */
9043 	if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
9044 		/* Return error */
9045 		EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
9046 		    sizeof (EXT_MENLO_MANAGE_INFO));
9047 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9048 		cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
9049 		cmd->ResponseLen = 0;
9050 		return;
9051 	}
9052 
9053 	/* Get manage info request. */
9054 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9055 	    (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
9056 		EL(ha, "failed, ddi_copyin\n");
9057 		cmd->Status = EXT_STATUS_COPY_ERR;
9058 		cmd->ResponseLen = 0;
9059 		return;
9060 	}
9061 
9062 	size = info.TotalByteCount;
9063 	if (!size) {
9064 		/* parameter error */
9065 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9066 		cmd->DetailStatus = 0;
9067 		EL(ha, "failed, size=%xh\n", size);
9068 		cmd->ResponseLen = 0;
9069 		return;
9070 	}
9071 
9072 	/* Allocate memory for command. */
9073 	tmp_buf = kmem_zalloc(size, KM_SLEEP);
9074 	if (tmp_buf == NULL) {
9075 		EL(ha, "failed, kmem_zalloc\n");
9076 		cmd->Status = EXT_STATUS_NO_MEMORY;
9077 		cmd->ResponseLen = 0;
9078 		return;
9079 	}
9080 
9081 	if (!(info.Operation & MENLO_OP_GET_INFO)) {
9082 		EL(ha, "Invalid request for 81XX\n");
9083 		kmem_free(tmp_buf, size);
9084 		cmd->Status = EXT_STATUS_ERR;
9085 		cmd->ResponseLen = 0;
9086 		return;
9087 	}
9088 
9089 	rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf);
9090 
9091 	if (rval != QL_SUCCESS) {
9092 		/* error */
9093 		EL(ha, "failed, get_xgmac_stats =%xh\n", rval);
9094 		kmem_free(tmp_buf, size);
9095 		cmd->Status = EXT_STATUS_ERR;
9096 		cmd->ResponseLen = 0;
9097 		return;
9098 	}
9099 
9100 	if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes,
9101 	    size, mode) != size) {
9102 		EL(ha, "failed, ddi_copyout\n");
9103 		cmd->Status = EXT_STATUS_COPY_ERR;
9104 		cmd->ResponseLen = 0;
9105 	} else {
9106 		cmd->ResponseLen = info.TotalByteCount;
9107 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9108 	}
9109 	kmem_free(tmp_buf, size);
9110 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9111 }
9112 
9113 /*
9114  * ql_get_fcf_list
9115  *	Get FCF list.
9116  *
9117  * Input:
9118  *	ha:	adapter state pointer.
9119  *	cmd:	User space CT arguments pointer.
9120  *	mode:	flags.
9121  */
9122 static void
ql_get_fcf_list(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9123 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9124 {
9125 	uint8_t			*tmp_buf;
9126 	int			rval;
9127 	EXT_FCF_LIST		fcf_list = {0};
9128 	ql_fcf_list_desc_t	mb_fcf_list = {0};
9129 
9130 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9131 
9132 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
9133 		EL(ha, "invalid request for HBA\n");
9134 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9135 		cmd->ResponseLen = 0;
9136 		return;
9137 	}
9138 	/* Get manage info request. */
9139 	if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9140 	    (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) {
9141 		EL(ha, "failed, ddi_copyin\n");
9142 		cmd->Status = EXT_STATUS_COPY_ERR;
9143 		cmd->ResponseLen = 0;
9144 		return;
9145 	}
9146 
9147 	if (!(fcf_list.BufSize)) {
9148 		/* Return error */
9149 		EL(ha, "failed, fcf_list BufSize is=%xh\n",
9150 		    fcf_list.BufSize);
9151 		cmd->Status = EXT_STATUS_INVALID_PARAM;
9152 		cmd->ResponseLen = 0;
9153 		return;
9154 	}
9155 	/* Allocate memory for command. */
9156 	tmp_buf = kmem_zalloc(fcf_list.BufSize, KM_SLEEP);
9157 	if (tmp_buf == NULL) {
9158 		EL(ha, "failed, kmem_zalloc\n");
9159 		cmd->Status = EXT_STATUS_NO_MEMORY;
9160 		cmd->ResponseLen = 0;
9161 		return;
9162 	}
9163 	/* build the descriptor */
9164 	if (fcf_list.Options) {
9165 		mb_fcf_list.options = FCF_LIST_RETURN_ONE;
9166 	} else {
9167 		mb_fcf_list.options = FCF_LIST_RETURN_ALL;
9168 	}
9169 	mb_fcf_list.fcf_index = (uint16_t)fcf_list.FcfIndex;
9170 	mb_fcf_list.buffer_size = fcf_list.BufSize;
9171 
9172 	/* Send command */
9173 	rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf);
9174 	if (rval != QL_SUCCESS) {
9175 		/* error */
9176 		EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval);
9177 		kmem_free(tmp_buf, fcf_list.BufSize);
9178 		cmd->Status = EXT_STATUS_ERR;
9179 		cmd->ResponseLen = 0;
9180 		return;
9181 	}
9182 
9183 	/* Copy the response */
9184 	if (ql_send_buffer_data((caddr_t)tmp_buf,
9185 	    (caddr_t)(uintptr_t)cmd->ResponseAdr,
9186 	    fcf_list.BufSize, mode) != fcf_list.BufSize) {
9187 		EL(ha, "failed, ddi_copyout\n");
9188 		cmd->Status = EXT_STATUS_COPY_ERR;
9189 		cmd->ResponseLen = 0;
9190 	} else {
9191 		cmd->ResponseLen = mb_fcf_list.buffer_size;
9192 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9193 	}
9194 
9195 	kmem_free(tmp_buf, fcf_list.BufSize);
9196 }
9197 
9198 /*
9199  * ql_get_resource_counts
9200  *	Get Resource counts:
9201  *
9202  * Input:
9203  *	ha:	adapter state pointer.
9204  *	cmd:	User space CT arguments pointer.
9205  *	mode:	flags.
9206  */
9207 static void
ql_get_resource_counts(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)9208 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9209 {
9210 	int			rval;
9211 	ql_mbx_data_t		mr;
9212 	EXT_RESOURCE_CNTS	tmp_rc_cnt = {0};
9213 
9214 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9215 
9216 	if (!(CFG_IST(ha, CFG_CTRL_242581))) {
9217 		EL(ha, "invalid request for HBA\n");
9218 		cmd->Status = EXT_STATUS_INVALID_REQUEST;
9219 		cmd->ResponseLen = 0;
9220 		return;
9221 	}
9222 
9223 	if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) {
9224 		cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9225 		cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS);
9226 		EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, "
9227 		    "Len=%xh\n", cmd->ResponseLen);
9228 		cmd->ResponseLen = 0;
9229 		return;
9230 	}
9231 
9232 	rval = ql_get_resource_cnts(ha, &mr);
9233 	if (rval != QL_SUCCESS) {
9234 		EL(ha, "resource cnt mbx failed\n");
9235 		cmd->Status = EXT_STATUS_ERR;
9236 		cmd->ResponseLen = 0;
9237 		return;
9238 	}
9239 
9240 	tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1];
9241 	tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2];
9242 	tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3];
9243 	tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6];
9244 	tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7];
9245 	tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10];
9246 	tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11];
9247 	tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12];
9248 
9249 	rval = ddi_copyout((void *)&tmp_rc_cnt,
9250 	    (void *)(uintptr_t)(cmd->ResponseAdr),
9251 	    sizeof (EXT_RESOURCE_CNTS), mode);
9252 	if (rval != 0) {
9253 		cmd->Status = EXT_STATUS_COPY_ERR;
9254 		cmd->ResponseLen = 0;
9255 		EL(ha, "failed, ddi_copyout\n");
9256 	} else {
9257 		cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS);
9258 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9259 	}
9260 }
9261