xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_ioctl.c (revision 2dea4eed7ad1c66ae4770263aa2911815a8b86eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_ioctl.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  * Fibre Channel Adapter (FCA) driver IOCTL source file.
34  *
35  * ***********************************************************************
36  * *									**
37  * *				NOTICE					**
38  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
39  * *			ALL RIGHTS RESERVED				**
40  * *									**
41  * ***********************************************************************
42  *
43  */
44 
45 #include <ql_apps.h>
46 #include <ql_api.h>
47 #include <ql_debug.h>
48 #include <ql_init.h>
49 #include <ql_ioctl.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local Function Prototypes.
55  */
56 static int ql_busy_notification(ql_adapter_state_t *);
57 static int ql_idle_notification(ql_adapter_state_t *);
58 static int ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features);
59 static int ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features);
60 static int ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha);
61 static void ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr,
62     uint16_t value);
63 static int ql_24xx_load_nvram(ql_adapter_state_t *, uint32_t, uint32_t);
64 static int ql_adm_op(ql_adapter_state_t *, void *, int);
65 static int ql_adm_adapter_info(ql_adapter_state_t *, ql_adm_op_t *, int);
66 static int ql_adm_extended_logging(ql_adapter_state_t *, ql_adm_op_t *);
67 static int ql_adm_device_list(ql_adapter_state_t *, ql_adm_op_t *, int);
68 static int ql_adm_update_properties(ql_adapter_state_t *);
69 static int ql_adm_prop_update_int(ql_adapter_state_t *, ql_adm_op_t *, int);
70 static int ql_adm_loop_reset(ql_adapter_state_t *);
71 static int ql_adm_fw_dump(ql_adapter_state_t *, ql_adm_op_t *, void *, int);
72 static int ql_adm_nvram_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
73 static int ql_adm_nvram_load(ql_adapter_state_t *, ql_adm_op_t *, int);
74 static int ql_adm_flash_load(ql_adapter_state_t *, ql_adm_op_t *, int);
75 static int ql_adm_vpd_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
76 static int ql_adm_vpd_load(ql_adapter_state_t *, ql_adm_op_t *, int);
77 static int ql_adm_vpd_gettag(ql_adapter_state_t *, ql_adm_op_t *, int);
78 static int ql_adm_updfwmodule(ql_adapter_state_t *, ql_adm_op_t *, int);
79 static uint8_t *ql_vpd_findtag(ql_adapter_state_t *, uint8_t *, int8_t *);
80 
81 /* ************************************************************************ */
82 /*				cb_ops functions			    */
83 /* ************************************************************************ */
84 
85 /*
86  * ql_open
87  *	opens device
88  *
89  * Input:
90  *	dev_p = device pointer
91  *	flags = open flags
92  *	otype = open type
93  *	cred_p = credentials pointer
94  *
95  * Returns:
96  *	0 = success
97  *
98  * Context:
99  *	Kernel context.
100  */
101 /* ARGSUSED */
102 int
103 ql_open(dev_t *dev_p, int flags, int otyp, cred_t *cred_p)
104 {
105 	ql_adapter_state_t	*ha;
106 	int			rval = 0;
107 
108 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(*dev_p));
109 	if (ha == NULL) {
110 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
111 		return (ENXIO);
112 	}
113 
114 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
115 
116 	/* Allow only character opens */
117 	if (otyp != OTYP_CHR) {
118 		QL_PRINT_2(CE_CONT, "(%d): failed, open type\n",
119 		    ha->instance);
120 		return (EINVAL);
121 	}
122 
123 	ADAPTER_STATE_LOCK(ha);
124 	if (flags & FEXCL && ha->flags & QL_OPENED) {
125 		ADAPTER_STATE_UNLOCK(ha);
126 		rval = EBUSY;
127 	} else {
128 		ha->flags |= QL_OPENED;
129 		ADAPTER_STATE_UNLOCK(ha);
130 	}
131 
132 	if (rval != 0) {
133 		EL(ha, "failed, rval = %xh\n", rval);
134 	} else {
135 		/*EMPTY*/
136 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
137 	}
138 	return (rval);
139 }
140 
141 /*
142  * ql_close
143  *	opens device
144  *
145  * Input:
146  *	dev_p = device pointer
147  *	flags = open flags
148  *	otype = open type
149  *	cred_p = credentials pointer
150  *
151  * Returns:
152  *	0 = success
153  *
154  * Context:
155  *	Kernel context.
156  */
157 /* ARGSUSED */
158 int
159 ql_close(dev_t dev, int flags, int otyp, cred_t *cred_p)
160 {
161 	ql_adapter_state_t	*ha;
162 	int			rval = 0;
163 
164 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
165 	if (ha == NULL) {
166 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
167 		return (ENXIO);
168 	}
169 
170 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
171 
172 	if (otyp != OTYP_CHR) {
173 		QL_PRINT_2(CE_CONT, "(%d): failed, open type\n",
174 		    ha->instance);
175 		return (EINVAL);
176 	}
177 
178 	ADAPTER_STATE_LOCK(ha);
179 	ha->flags &= ~QL_OPENED;
180 	ADAPTER_STATE_UNLOCK(ha);
181 
182 	if (rval != 0) {
183 		EL(ha, "failed, rval = %xh\n", rval);
184 	} else {
185 		/*EMPTY*/
186 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
187 	}
188 	return (rval);
189 }
190 
191 /*
192  * ql_ioctl
193  *	control a character device
194  *
195  * Input:
196  *	dev = device number
197  *	cmd = function to perform
198  *	arg = data type varies with request
199  *	mode = flags
200  *	cred_p = credentials pointer
201  *	rval_p = pointer to result value
202  *
203  * Returns:
204  *	0 = success
205  *
206  * Context:
207  *	Kernel context.
208  */
209 /* ARGSUSED */
210 int
211 ql_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
212     int *rval_p)
213 {
214 	ql_adapter_state_t	*ha;
215 	int			rval = 0;
216 
217 	if (ddi_in_panic()) {
218 		QL_PRINT_2(CE_CONT, "ql_ioctl: ddi_in_panic exit\n");
219 		return (ENOPROTOOPT);
220 	}
221 
222 	ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
223 	if (ha == NULL)	{
224 		QL_PRINT_2(CE_CONT, "failed, no adapter\n");
225 		return (ENXIO);
226 	}
227 
228 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
229 
230 	/*
231 	 * Quick clean exit for qla2x00 foapi calls which are
232 	 * not supported in qlc.
233 	 */
234 	if (cmd >= QL_FOAPI_START && cmd <= QL_FOAPI_END) {
235 		QL_PRINT_9(CE_CONT, "failed, fo api not supported\n");
236 		return (ENOTTY);
237 	}
238 
239 	/* PWR management busy. */
240 	rval = ql_busy_notification(ha);
241 	if (rval != FC_SUCCESS)	 {
242 		EL(ha, "failed, ql_busy_notification\n");
243 		return (ENXIO);
244 	}
245 
246 	rval = ql_xioctl(ha, cmd, arg, mode, cred_p, rval_p);
247 	if (rval == ENOPROTOOPT || rval == EINVAL) {
248 		switch (cmd) {
249 		case QL_GET_ADAPTER_FEATURE_BITS: {
250 			uint16_t bits;
251 
252 			rval = ql_get_feature_bits(ha, &bits);
253 
254 			if (!rval && ddi_copyout((void *)&bits, (void *)arg,
255 			    sizeof (bits), mode)) {
256 				rval = EFAULT;
257 			}
258 			break;
259 		}
260 
261 		case QL_SET_ADAPTER_FEATURE_BITS: {
262 			uint16_t bits;
263 
264 			if (ddi_copyin((void *)arg, (void *)&bits,
265 			    sizeof (bits), mode)) {
266 				rval = EFAULT;
267 				break;
268 			}
269 
270 			rval = ql_set_feature_bits(ha, bits);
271 			break;
272 		}
273 
274 		case QL_SET_ADAPTER_NVRAM_DEFAULTS:
275 			rval = ql_set_nvram_adapter_defaults(ha);
276 			break;
277 
278 		case QL_UTIL_LOAD:
279 			rval = ql_nv_util_load(ha, (void *)arg, mode);
280 			break;
281 
282 		case QL_UTIL_DUMP:
283 			rval = ql_nv_util_dump(ha, (void *)arg, mode);
284 			break;
285 
286 		case QL_ADM_OP:
287 			rval = ql_adm_op(ha, (void *)arg, mode);
288 			break;
289 
290 		default:
291 			EL(ha, "unknown command = %d\n", cmd);
292 			rval = ENOTTY;
293 			break;
294 		}
295 	}
296 
297 	/* PWR management idle. */
298 	(void) ql_idle_notification(ha);
299 
300 	if (rval != 0) {
301 		EL(ha, "failed, rval = %d\n", rval);
302 	} else {
303 		/*EMPTY*/
304 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
305 	}
306 	return (rval);
307 }
308 
309 /*
310  * ql_busy_notification
311  *	Adapter busy notification.
312  *
313  * Input:
314  *	ha = adapter state pointer.
315  *
316  * Returns:
317  *	FC_SUCCESS
318  *	FC_FAILURE
319  *
320  * Context:
321  *	Kernel context.
322  */
323 static int
324 ql_busy_notification(ql_adapter_state_t *ha)
325 {
326 	if (!ha->pm_capable) {
327 		return (FC_SUCCESS);
328 	}
329 
330 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
331 
332 	QL_PM_LOCK(ha);
333 	ha->busy++;
334 	QL_PM_UNLOCK(ha);
335 
336 	if (pm_busy_component(ha->dip, 0) != DDI_SUCCESS) {
337 		QL_PM_LOCK(ha);
338 		ha->busy--;
339 		QL_PM_UNLOCK(ha);
340 
341 		EL(ha, "pm_busy_component failed = %xh\n", FC_FAILURE);
342 		return (FC_FAILURE);
343 	}
344 
345 	QL_PM_LOCK(ha);
346 	if (ha->power_level != PM_LEVEL_D0) {
347 		QL_PM_UNLOCK(ha);
348 		if (pm_raise_power(ha->dip, 0, 1) != DDI_SUCCESS) {
349 			QL_PM_LOCK(ha);
350 			ha->busy--;
351 			QL_PM_UNLOCK(ha);
352 			return (FC_FAILURE);
353 		}
354 	} else {
355 		QL_PM_UNLOCK(ha);
356 	}
357 
358 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
359 
360 	return (FC_SUCCESS);
361 }
362 
363 /*
364  * ql_idle_notification
365  *	Adapter idle notification.
366  *
367  * Input:
368  *	ha = adapter state pointer.
369  *
370  * Returns:
371  *	FC_SUCCESS
372  *	FC_FAILURE
373  *
374  * Context:
375  *	Kernel context.
376  */
377 static int
378 ql_idle_notification(ql_adapter_state_t *ha)
379 {
380 	if (!ha->pm_capable) {
381 		return (FC_SUCCESS);
382 	}
383 
384 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
385 
386 	if (pm_idle_component(ha->dip, 0) != DDI_SUCCESS) {
387 		EL(ha, "pm_idle_component failed = %xh\n", FC_FAILURE);
388 		return (FC_FAILURE);
389 	}
390 
391 	QL_PM_LOCK(ha);
392 	ha->busy--;
393 	QL_PM_UNLOCK(ha);
394 
395 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
396 
397 	return (FC_SUCCESS);
398 }
399 
400 /*
401  * Get adapter feature bits from NVRAM
402  */
403 static int
404 ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features)
405 {
406 	int			count;
407 	volatile uint16_t	data;
408 	uint32_t		nv_cmd;
409 	uint32_t		start_addr;
410 	int			rval;
411 	uint32_t		offset = offsetof(nvram_t, adapter_features);
412 
413 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
414 
415 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
416 		EL(ha, "Not supported for 24xx\n");
417 		return (EINVAL);
418 	}
419 
420 	/*
421 	 * The offset can't be greater than max of 8 bits and
422 	 * the following code breaks if the offset isn't at
423 	 * 2 byte boundary.
424 	 */
425 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
426 	if (rval != QL_SUCCESS) {
427 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
428 		return (EIO);
429 	}
430 
431 	/*
432 	 * Have the most significant 3 bits represent the read operation
433 	 * followed by the 8 bits representing the offset at which we
434 	 * are going to perform the read operation
435 	 */
436 	offset >>= 1;
437 	offset += start_addr;
438 	nv_cmd = (offset << 16) | NV_READ_OP;
439 	nv_cmd <<= 5;
440 
441 	/*
442 	 * Select the chip and feed the command and address
443 	 */
444 	for (count = 0; count < 11; count++) {
445 		if (nv_cmd & BIT_31) {
446 			ql_nv_write(ha, NV_DATA_OUT);
447 		} else {
448 			ql_nv_write(ha, 0);
449 		}
450 		nv_cmd <<= 1;
451 	}
452 
453 	*features = 0;
454 	for (count = 0; count < 16; count++) {
455 		WRT16_IO_REG(ha, nvram, NV_SELECT | NV_CLOCK);
456 		ql_nv_delay();
457 
458 		data = RD16_IO_REG(ha, nvram);
459 		*features <<= 1;
460 		if (data & NV_DATA_IN) {
461 			*features = (uint16_t)(*features | 0x1);
462 		}
463 
464 		WRT16_IO_REG(ha, nvram, NV_SELECT);
465 		ql_nv_delay();
466 	}
467 
468 	/*
469 	 * Deselect the chip
470 	 */
471 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
472 
473 	ql_release_nvram(ha);
474 
475 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
476 
477 	return (0);
478 }
479 
480 /*
481  * Set adapter feature bits in NVRAM
482  */
483 static int
484 ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features)
485 {
486 	int		rval;
487 	uint32_t	count;
488 	nvram_t		*nv;
489 	uint16_t	*wptr;
490 	uint8_t		*bptr;
491 	uint8_t		csum;
492 	uint32_t	start_addr;
493 
494 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
495 
496 	if (CFG_IST(ha, CFG_CTRL_242581)) {
497 		EL(ha, "Not supported for 24xx\n");
498 		return (EINVAL);
499 	}
500 
501 	nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
502 	if (nv == NULL) {
503 		EL(ha, "failed, kmem_zalloc\n");
504 		return (ENOMEM);
505 	}
506 
507 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
508 	if (rval != QL_SUCCESS) {
509 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
510 		kmem_free(nv, sizeof (*nv));
511 		return (EIO);
512 	}
513 	rval = 0;
514 
515 	/*
516 	 * Read off the whole NVRAM
517 	 */
518 	wptr = (uint16_t *)nv;
519 	csum = 0;
520 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
521 		*wptr = (uint16_t)ql_get_nvram_word(ha, count + start_addr);
522 		csum = (uint8_t)(csum + (uint8_t)*wptr);
523 		csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
524 		wptr++;
525 	}
526 
527 	/*
528 	 * If the checksum is BAD then fail it right here.
529 	 */
530 	if (csum) {
531 		kmem_free(nv, sizeof (*nv));
532 		ql_release_nvram(ha);
533 		return (EBADF);
534 	}
535 
536 	nv->adapter_features[0] = (uint8_t)((features & 0xFF00) >> 8);
537 	nv->adapter_features[1] = (uint8_t)(features & 0xFF);
538 
539 	/*
540 	 * Recompute the chesksum now
541 	 */
542 	bptr = (uint8_t *)nv;
543 	for (count = 0; count < sizeof (nvram_t) - 1; count++) {
544 		csum = (uint8_t)(csum + *bptr++);
545 	}
546 	csum = (uint8_t)(~csum + 1);
547 	nv->checksum = csum;
548 
549 	/*
550 	 * Now load the NVRAM
551 	 */
552 	wptr = (uint16_t *)nv;
553 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
554 		ql_load_nvram(ha, (uint8_t)(count + start_addr), *wptr++);
555 	}
556 
557 	/*
558 	 * Read NVRAM and verify the contents
559 	 */
560 	wptr = (uint16_t *)nv;
561 	csum = 0;
562 	for (count = 0; count < sizeof (nvram_t) / 2; count++) {
563 		if (ql_get_nvram_word(ha, count + start_addr) != *wptr) {
564 			rval = EIO;
565 			break;
566 		}
567 		csum = (uint8_t)(csum + (uint8_t)*wptr);
568 		csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
569 		wptr++;
570 	}
571 
572 	if (csum) {
573 		rval = EINVAL;
574 	}
575 
576 	kmem_free(nv, sizeof (*nv));
577 	ql_release_nvram(ha);
578 
579 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
580 
581 	return (rval);
582 }
583 
584 /*
585  * Fix this function to update just feature bits and checksum in NVRAM
586  */
587 static int
588 ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha)
589 {
590 	int		rval;
591 	uint32_t	count;
592 	uint32_t	start_addr;
593 
594 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
595 
596 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
597 	if (rval != QL_SUCCESS) {
598 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
599 		return (EIO);
600 	}
601 	rval = 0;
602 
603 	if (CFG_IST(ha, CFG_CTRL_242581)) {
604 		nvram_24xx_t	*nv;
605 		uint32_t	*longptr;
606 		uint32_t	csum = 0;
607 
608 		nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
609 		if (nv == NULL) {
610 			EL(ha, "failed, kmem_zalloc\n");
611 			return (ENOMEM);
612 		}
613 
614 		nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
615 		nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
616 
617 		nv->version[0] = 1;
618 		nv->max_frame_length[1] = 8;
619 		nv->execution_throttle[0] = 16;
620 		nv->login_retry_count[0] = 8;
621 
622 		nv->firmware_options_1[0] = BIT_2 | BIT_1;
623 		nv->firmware_options_1[1] = BIT_5;
624 		nv->firmware_options_2[0] = BIT_5;
625 		nv->firmware_options_2[1] = BIT_4;
626 		nv->firmware_options_3[1] = BIT_6;
627 
628 		/*
629 		 * Set default host adapter parameters
630 		 */
631 		nv->host_p[0] = BIT_4 | BIT_1;
632 		nv->host_p[1] = BIT_3 | BIT_2;
633 		nv->reset_delay = 5;
634 		nv->max_luns_per_target[0] = 128;
635 		nv->port_down_retry_count[0] = 30;
636 		nv->link_down_timeout[0] = 30;
637 
638 		/*
639 		 * compute the chesksum now
640 		 */
641 		longptr = (uint32_t *)nv;
642 		csum = 0;
643 		for (count = 0; count < (sizeof (nvram_24xx_t)/4)-1; count++) {
644 			csum += *longptr;
645 			longptr++;
646 		}
647 		csum = (uint32_t)(~csum + 1);
648 		LITTLE_ENDIAN_32((long)csum);
649 		*longptr = csum;
650 
651 		/*
652 		 * Now load the NVRAM
653 		 */
654 		longptr = (uint32_t *)nv;
655 		for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
656 			(void) ql_24xx_load_nvram(ha,
657 			    (uint32_t)(count + start_addr), *longptr++);
658 		}
659 
660 		/*
661 		 * Read NVRAM and verify the contents
662 		 */
663 		csum = 0;
664 		longptr = (uint32_t *)nv;
665 		for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
666 			rval = ql_24xx_read_flash(ha, count + start_addr,
667 			    longptr);
668 			if (rval != QL_SUCCESS) {
669 				EL(ha, "24xx_read_flash failed=%xh\n", rval);
670 				break;
671 			}
672 			csum += *longptr;
673 		}
674 
675 		if (csum) {
676 			rval = EINVAL;
677 		}
678 		kmem_free(nv, sizeof (nvram_24xx_t));
679 	} else {
680 		nvram_t		*nv;
681 		uint16_t	*wptr;
682 		uint8_t		*bptr;
683 		uint8_t		csum;
684 
685 		nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
686 		if (nv == NULL) {
687 			EL(ha, "failed, kmem_zalloc\n");
688 			return (ENOMEM);
689 		}
690 		/*
691 		 * Set default initialization control block.
692 		 */
693 		nv->parameter_block_version = ICB_VERSION;
694 		nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
695 		nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
696 
697 		nv->max_frame_length[1] = 4;
698 		nv->max_iocb_allocation[1] = 1;
699 		nv->execution_throttle[0] = 16;
700 		nv->login_retry_count = 8;
701 		nv->port_name[0] = 33;
702 		nv->port_name[3] = 224;
703 		nv->port_name[4] = 139;
704 		nv->login_timeout = 4;
705 
706 		/*
707 		 * Set default host adapter parameters
708 		 */
709 		nv->host_p[0] = BIT_1;
710 		nv->host_p[1] = BIT_2;
711 		nv->reset_delay = 5;
712 		nv->port_down_retry_count = 8;
713 		nv->maximum_luns_per_target[0] = 8;
714 
715 		/*
716 		 * compute the chesksum now
717 		 */
718 		bptr = (uint8_t *)nv;
719 		csum = 0;
720 		for (count = 0; count < sizeof (nvram_t) - 1; count++) {
721 			csum = (uint8_t)(csum + *bptr++);
722 		}
723 		csum = (uint8_t)(~csum + 1);
724 		nv->checksum = csum;
725 
726 		/*
727 		 * Now load the NVRAM
728 		 */
729 		wptr = (uint16_t *)nv;
730 		for (count = 0; count < sizeof (nvram_t) / 2; count++) {
731 			ql_load_nvram(ha, (uint8_t)(count + start_addr),
732 			    *wptr++);
733 		}
734 
735 		/*
736 		 * Read NVRAM and verify the contents
737 		 */
738 		wptr = (uint16_t *)nv;
739 		csum = 0;
740 		for (count = 0; count < sizeof (nvram_t) / 2; count++) {
741 			if (ql_get_nvram_word(ha, count + start_addr) !=
742 			    *wptr) {
743 				rval = EIO;
744 				break;
745 			}
746 			csum = (uint8_t)(csum + (uint8_t)*wptr);
747 			csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
748 			wptr++;
749 		}
750 		if (csum) {
751 			rval = EINVAL;
752 		}
753 		kmem_free(nv, sizeof (*nv));
754 	}
755 	ql_release_nvram(ha);
756 
757 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
758 
759 	return (rval);
760 }
761 
762 static void
763 ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, uint16_t value)
764 {
765 	int			count;
766 	volatile uint16_t	word;
767 	volatile uint32_t	nv_cmd;
768 
769 	ql_nv_write(ha, NV_DATA_OUT);
770 	ql_nv_write(ha, 0);
771 	ql_nv_write(ha, 0);
772 
773 	for (word = 0; word < 8; word++) {
774 		ql_nv_write(ha, NV_DATA_OUT);
775 	}
776 
777 	/*
778 	 * Deselect the chip
779 	 */
780 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
781 	ql_nv_delay();
782 
783 	/*
784 	 * Erase Location
785 	 */
786 	nv_cmd = (addr << 16) | NV_ERASE_OP;
787 	nv_cmd <<= 5;
788 	for (count = 0; count < 11; count++) {
789 		if (nv_cmd & BIT_31) {
790 			ql_nv_write(ha, NV_DATA_OUT);
791 		} else {
792 			ql_nv_write(ha, 0);
793 		}
794 		nv_cmd <<= 1;
795 	}
796 
797 	/*
798 	 * Wait for Erase to Finish
799 	 */
800 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
801 	ql_nv_delay();
802 	WRT16_IO_REG(ha, nvram, NV_SELECT);
803 	word = 0;
804 	while ((word & NV_DATA_IN) == 0) {
805 		ql_nv_delay();
806 		word = RD16_IO_REG(ha, nvram);
807 	}
808 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
809 	ql_nv_delay();
810 
811 	/*
812 	 * Write data now
813 	 */
814 	nv_cmd = (addr << 16) | NV_WRITE_OP;
815 	nv_cmd |= value;
816 	nv_cmd <<= 5;
817 	for (count = 0; count < 27; count++) {
818 		if (nv_cmd & BIT_31) {
819 			ql_nv_write(ha, NV_DATA_OUT);
820 		} else {
821 			ql_nv_write(ha, 0);
822 		}
823 		nv_cmd <<= 1;
824 	}
825 
826 	/*
827 	 * Wait for NVRAM to become ready
828 	 */
829 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
830 	ql_nv_delay();
831 	WRT16_IO_REG(ha, nvram, NV_SELECT);
832 	word = 0;
833 	while ((word & NV_DATA_IN) == 0) {
834 		ql_nv_delay();
835 		word = RD16_IO_REG(ha, nvram);
836 	}
837 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
838 	ql_nv_delay();
839 
840 	/*
841 	 * Disable writes
842 	 */
843 	ql_nv_write(ha, NV_DATA_OUT);
844 	for (count = 0; count < 10; count++) {
845 		ql_nv_write(ha, 0);
846 	}
847 
848 	/*
849 	 * Deselect the chip now
850 	 */
851 	WRT16_IO_REG(ha, nvram, NV_DESELECT);
852 }
853 
854 /*
855  * ql_24xx_load_nvram
856  *	Enable NVRAM and writes a 32bit word to ISP24xx NVRAM.
857  *
858  * Input:
859  *	ha:	adapter state pointer.
860  *	addr:	NVRAM address.
861  *	value:	data.
862  *
863  * Returns:
864  *	ql local function return status code.
865  *
866  * Context:
867  *	Kernel context.
868  */
869 static int
870 ql_24xx_load_nvram(ql_adapter_state_t *ha, uint32_t addr, uint32_t value)
871 {
872 	int	rval;
873 
874 	/* Enable flash write. */
875 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
876 		WRT32_IO_REG(ha, ctrl_status,
877 		    RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
878 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
879 	}
880 
881 	/* Disable NVRAM write-protection. */
882 	if (CFG_IST(ha, CFG_CTRL_2422)) {
883 		(void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0);
884 	} else {
885 		if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
886 			EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
887 			return (rval);
888 		}
889 	}
890 
891 	/* Write to flash. */
892 	rval = ql_24xx_write_flash(ha, addr, value);
893 
894 	/* Enable NVRAM write-protection. */
895 	if (CFG_IST(ha, CFG_CTRL_2422)) {
896 		/* TODO: Check if 0x8c is correct -- sb: 0x9c ? */
897 		(void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0x8c);
898 	} else {
899 		ql_24xx_protect_flash(ha);
900 	}
901 
902 	/* Disable flash write. */
903 	if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
904 		WRT32_IO_REG(ha, ctrl_status,
905 		    RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
906 		RD32_IO_REG(ha, ctrl_status);	/* PCI Posting. */
907 	}
908 
909 	return (rval);
910 }
911 
912 /*
913  * ql_nv_util_load
914  *	Loads NVRAM from application.
915  *
916  * Input:
917  *	ha = adapter state pointer.
918  *	bp = user buffer address.
919  *
920  * Returns:
921  *
922  * Context:
923  *	Kernel context.
924  */
925 int
926 ql_nv_util_load(ql_adapter_state_t *ha, void *bp, int mode)
927 {
928 	uint8_t		cnt;
929 	void		*nv;
930 	uint16_t	*wptr;
931 	uint16_t	data;
932 	uint32_t	start_addr, *lptr, data32;
933 	nvram_t		*nptr;
934 	int		rval;
935 
936 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
937 
938 	if ((nv = kmem_zalloc(ha->nvram_cache->size, KM_SLEEP)) == NULL) {
939 		EL(ha, "failed, kmem_zalloc\n");
940 		return (ENOMEM);
941 	}
942 
943 	if (ddi_copyin(bp, nv, ha->nvram_cache->size, mode) != 0) {
944 		EL(ha, "Buffer copy failed\n");
945 		kmem_free(nv, ha->nvram_cache->size);
946 		return (EFAULT);
947 	}
948 
949 	/* See if the buffer passed to us looks sane */
950 	nptr = (nvram_t *)nv;
951 	if (nptr->id[0] != 'I' || nptr->id[1] != 'S' || nptr->id[2] != 'P' ||
952 	    nptr->id[3] != ' ') {
953 		EL(ha, "failed, buffer sanity check\n");
954 		kmem_free(nv, ha->nvram_cache->size);
955 		return (EINVAL);
956 	}
957 
958 	/* Quiesce I/O */
959 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
960 		EL(ha, "ql_stall_driver failed\n");
961 		kmem_free(nv, ha->nvram_cache->size);
962 		return (EBUSY);
963 	}
964 
965 	rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
966 	if (rval != QL_SUCCESS) {
967 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
968 		kmem_free(nv, ha->nvram_cache->size);
969 		ql_restart_driver(ha);
970 		return (EIO);
971 	}
972 
973 	/* Load NVRAM. */
974 	if (CFG_IST(ha, CFG_CTRL_2581)) {
975 		GLOBAL_HW_UNLOCK();
976 		start_addr &= ~ha->flash_data_addr;
977 		start_addr <<= 2;
978 		if ((rval = ql_r_m_w_flash(ha, bp, ha->nvram_cache->size,
979 		    start_addr, mode)) != QL_SUCCESS) {
980 			EL(ha, "nvram load failed, rval = %0xh\n", rval);
981 		}
982 		GLOBAL_HW_LOCK();
983 	} else if (CFG_IST(ha, CFG_CTRL_2422)) {
984 		lptr = (uint32_t *)nv;
985 		for (cnt = 0; cnt < ha->nvram_cache->size / 4; cnt++) {
986 			data32 = *lptr++;
987 			LITTLE_ENDIAN_32(&data32);
988 			rval = ql_24xx_load_nvram(ha, cnt + start_addr,
989 			    data32);
990 			if (rval != QL_SUCCESS) {
991 				EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
992 				break;
993 			}
994 		}
995 	} else {
996 		wptr = (uint16_t *)nv;
997 		for (cnt = 0; cnt < ha->nvram_cache->size / 2; cnt++) {
998 			data = *wptr++;
999 			LITTLE_ENDIAN_16(&data);
1000 			ql_load_nvram(ha, (uint8_t)(cnt + start_addr), data);
1001 		}
1002 	}
1003 	/* switch to the new one */
1004 	NVRAM_CACHE_LOCK(ha);
1005 
1006 	kmem_free(ha->nvram_cache->cache, ha->nvram_cache->size);
1007 	ha->nvram_cache->cache = (void *)nptr;
1008 
1009 	NVRAM_CACHE_UNLOCK(ha);
1010 
1011 	ql_release_nvram(ha);
1012 	ql_restart_driver(ha);
1013 
1014 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1015 
1016 	if (rval == QL_SUCCESS) {
1017 		return (0);
1018 	}
1019 
1020 	return (EFAULT);
1021 }
1022 
1023 /*
1024  * ql_nv_util_dump
1025  *	Dumps NVRAM to application.
1026  *
1027  * Input:
1028  *	ha = adapter state pointer.
1029  *	bp = user buffer address.
1030  *
1031  * Returns:
1032  *
1033  * Context:
1034  *	Kernel context.
1035  */
1036 int
1037 ql_nv_util_dump(ql_adapter_state_t *ha, void *bp, int mode)
1038 {
1039 	uint32_t	start_addr;
1040 	int		rval2, rval = 0;
1041 
1042 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1043 
1044 	if (ha->nvram_cache == NULL ||
1045 	    ha->nvram_cache->size == NULL ||
1046 	    ha->nvram_cache->cache == NULL) {
1047 		EL(ha, "failed, kmem_zalloc\n");
1048 		return (ENOMEM);
1049 	} else if (ha->nvram_cache->valid != 1) {
1050 
1051 		/* Quiesce I/O */
1052 		if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1053 			EL(ha, "ql_stall_driver failed\n");
1054 			return (EBUSY);
1055 		}
1056 
1057 		rval2 = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
1058 		if (rval2 != QL_SUCCESS) {
1059 			EL(ha, "failed, ql_lock_nvram=%xh\n", rval2);
1060 			ql_restart_driver(ha);
1061 			return (EIO);
1062 		}
1063 		NVRAM_CACHE_LOCK(ha);
1064 
1065 		rval2 = ql_get_nvram(ha, ha->nvram_cache->cache,
1066 		    start_addr, ha->nvram_cache->size);
1067 		if (rval2 != QL_SUCCESS) {
1068 			rval = rval2;
1069 		} else {
1070 			ha->nvram_cache->valid = 1;
1071 			EL(ha, "nvram cache now valid.");
1072 		}
1073 
1074 		NVRAM_CACHE_UNLOCK(ha);
1075 
1076 		ql_release_nvram(ha);
1077 		ql_restart_driver(ha);
1078 
1079 		if (rval != 0) {
1080 			EL(ha, "failed to dump nvram, rval=%x\n", rval);
1081 			return (rval);
1082 		}
1083 	}
1084 
1085 	if (ddi_copyout(ha->nvram_cache->cache, bp,
1086 	    ha->nvram_cache->size, mode) != 0) {
1087 		EL(ha, "Buffer copy failed\n");
1088 		return (EFAULT);
1089 	}
1090 
1091 	EL(ha, "nvram cache accessed.");
1092 
1093 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1094 
1095 	return (0);
1096 }
1097 
1098 int
1099 ql_get_nvram(ql_adapter_state_t *ha, void *dest_addr, uint32_t src_addr,
1100     uint32_t size)
1101 {
1102 	int rval = QL_SUCCESS;
1103 	int cnt;
1104 	/* Dump NVRAM. */
1105 	if (CFG_IST(ha, CFG_CTRL_242581)) {
1106 		uint32_t	*lptr = (uint32_t *)dest_addr;
1107 
1108 		for (cnt = 0; cnt < size / 4; cnt++) {
1109 			rval = ql_24xx_read_flash(ha, src_addr++, lptr);
1110 			if (rval != QL_SUCCESS) {
1111 				EL(ha, "read_flash failed=%xh\n", rval);
1112 				rval = EAGAIN;
1113 				break;
1114 			}
1115 			LITTLE_ENDIAN_32(lptr);
1116 			lptr++;
1117 		}
1118 	} else {
1119 		uint16_t	data;
1120 		uint16_t	*wptr = (uint16_t *)dest_addr;
1121 
1122 		for (cnt = 0; cnt < size / 2; cnt++) {
1123 			data = (uint16_t)ql_get_nvram_word(ha, cnt +
1124 			    src_addr);
1125 			LITTLE_ENDIAN_16(&data);
1126 			*wptr++ = data;
1127 		}
1128 	}
1129 	return (rval);
1130 }
1131 
1132 /*
1133  * ql_vpd_load
1134  *	Loads VPD from application.
1135  *
1136  * Input:
1137  *	ha = adapter state pointer.
1138  *	bp = user buffer address.
1139  *
1140  * Returns:
1141  *
1142  * Context:
1143  *	Kernel context.
1144  */
1145 int
1146 ql_vpd_load(ql_adapter_state_t *ha, void *bp, int mode)
1147 {
1148 	uint8_t		cnt;
1149 	uint8_t		*vpd, *vpdptr, *vbuf;
1150 	uint32_t	start_addr, vpd_size, *lptr, data32;
1151 	int		rval;
1152 
1153 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1154 
1155 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
1156 		EL(ha, "unsupported adapter feature\n");
1157 		return (ENOTSUP);
1158 	}
1159 
1160 	vpd_size = QL_24XX_VPD_SIZE;
1161 
1162 	if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1163 		EL(ha, "failed, kmem_zalloc\n");
1164 		return (ENOMEM);
1165 	}
1166 
1167 	if (ddi_copyin(bp, vpd, vpd_size, mode) != 0) {
1168 		EL(ha, "Buffer copy failed\n");
1169 		kmem_free(vpd, vpd_size);
1170 		return (EFAULT);
1171 	}
1172 
1173 	/* Sanity check the user supplied data via checksum */
1174 	if ((vpdptr = ql_vpd_findtag(ha, vpd, "RV")) == NULL) {
1175 		EL(ha, "vpd RV tag missing\n");
1176 		kmem_free(vpd, vpd_size);
1177 		return (EINVAL);
1178 	}
1179 
1180 	vpdptr += 3;
1181 	cnt = 0;
1182 	vbuf = vpd;
1183 	while (vbuf <= vpdptr) {
1184 		cnt += *vbuf++;
1185 	}
1186 	if (cnt != 0) {
1187 		EL(ha, "mismatched checksum, cal=%xh, passed=%xh\n",
1188 		    (uint8_t)cnt, (uintptr_t)vpdptr);
1189 		kmem_free(vpd, vpd_size);
1190 		return (EINVAL);
1191 	}
1192 
1193 	/* Quiesce I/O */
1194 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1195 		EL(ha, "ql_stall_driver failed\n");
1196 		kmem_free(vpd, vpd_size);
1197 		return (EBUSY);
1198 	}
1199 
1200 	rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1201 	if (rval != QL_SUCCESS) {
1202 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1203 		kmem_free(vpd, vpd_size);
1204 		ql_restart_driver(ha);
1205 		return (EIO);
1206 	}
1207 
1208 	/* Load VPD. */
1209 	if (CFG_IST(ha, CFG_CTRL_2581)) {
1210 		GLOBAL_HW_UNLOCK();
1211 		start_addr &= ~ha->flash_data_addr;
1212 		start_addr <<= 2;
1213 		if ((rval = ql_r_m_w_flash(ha, bp, vpd_size, start_addr,
1214 		    mode)) != QL_SUCCESS) {
1215 			EL(ha, "vpd load error: %xh\n", rval);
1216 		}
1217 		GLOBAL_HW_LOCK();
1218 	} else {
1219 		lptr = (uint32_t *)vpd;
1220 		for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1221 			data32 = *lptr++;
1222 			LITTLE_ENDIAN_32(&data32);
1223 			rval = ql_24xx_load_nvram(ha, cnt + start_addr,
1224 			    data32);
1225 			if (rval != QL_SUCCESS) {
1226 				EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
1227 				break;
1228 			}
1229 		}
1230 	}
1231 
1232 	kmem_free(vpd, vpd_size);
1233 
1234 	/* Update the vcache */
1235 	CACHE_LOCK(ha);
1236 
1237 	if (rval != QL_SUCCESS) {
1238 		EL(ha, "failed, load\n");
1239 	} else if ((ha->vcache == NULL) && ((ha->vcache =
1240 	    kmem_zalloc(vpd_size, KM_SLEEP)) == NULL)) {
1241 		EL(ha, "failed, kmem_zalloc2\n");
1242 	} else if (ddi_copyin(bp, ha->vcache, vpd_size, mode) != 0) {
1243 		EL(ha, "Buffer copy2 failed\n");
1244 		kmem_free(ha->vcache, vpd_size);
1245 		ha->vcache = NULL;
1246 	}
1247 
1248 	CACHE_UNLOCK(ha);
1249 
1250 	ql_release_nvram(ha);
1251 	ql_restart_driver(ha);
1252 
1253 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1254 
1255 	if (rval == QL_SUCCESS) {
1256 		return (0);
1257 	}
1258 
1259 	return (EFAULT);
1260 }
1261 
1262 /*
1263  * ql_vpd_dump
1264  *	Dumps VPD to application buffer.
1265  *
1266  * Input:
1267  *	ha = adapter state pointer.
1268  *	bp = user buffer address.
1269  *
1270  * Returns:
1271  *
1272  * Context:
1273  *	Kernel context.
1274  */
1275 int
1276 ql_vpd_dump(ql_adapter_state_t *ha, void *bp, int mode)
1277 {
1278 	uint8_t		cnt;
1279 	void		*vpd;
1280 	uint32_t	start_addr, vpd_size, *lptr;
1281 	int		rval = 0;
1282 
1283 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1284 
1285 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
1286 		EL(ha, "unsupported adapter feature\n");
1287 		return (EACCES);
1288 	}
1289 
1290 	vpd_size = QL_24XX_VPD_SIZE;
1291 
1292 	CACHE_LOCK(ha);
1293 
1294 	if (ha->vcache != NULL) {
1295 		/* copy back the vpd cache data */
1296 		if (ddi_copyout(ha->vcache, bp, vpd_size, mode) != 0) {
1297 			EL(ha, "Buffer copy failed\n");
1298 			rval = EFAULT;
1299 		}
1300 		CACHE_UNLOCK(ha);
1301 		return (rval);
1302 	}
1303 
1304 	if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1305 		CACHE_UNLOCK(ha);
1306 		EL(ha, "failed, kmem_zalloc\n");
1307 		return (ENOMEM);
1308 	}
1309 
1310 	/* Quiesce I/O */
1311 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1312 		CACHE_UNLOCK(ha);
1313 		EL(ha, "ql_stall_driver failed\n");
1314 		kmem_free(vpd, vpd_size);
1315 		return (EBUSY);
1316 	}
1317 
1318 	rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1319 	if (rval != QL_SUCCESS) {
1320 		CACHE_UNLOCK(ha);
1321 		EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1322 		kmem_free(vpd, vpd_size);
1323 		ql_restart_driver(ha);
1324 		return (EIO);
1325 	}
1326 
1327 	/* Dump VPD. */
1328 	lptr = (uint32_t *)vpd;
1329 
1330 	for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1331 		rval = ql_24xx_read_flash(ha, start_addr++, lptr);
1332 		if (rval != QL_SUCCESS) {
1333 			EL(ha, "read_flash failed=%xh\n", rval);
1334 			rval = EAGAIN;
1335 			break;
1336 		}
1337 		LITTLE_ENDIAN_32(lptr);
1338 		lptr++;
1339 	}
1340 
1341 	ql_release_nvram(ha);
1342 	ql_restart_driver(ha);
1343 
1344 	if (ddi_copyout(vpd, bp, vpd_size, mode) != 0) {
1345 		CACHE_UNLOCK(ha);
1346 		EL(ha, "Buffer copy failed\n");
1347 		kmem_free(vpd, vpd_size);
1348 		return (EFAULT);
1349 	}
1350 
1351 	ha->vcache = vpd;
1352 
1353 	CACHE_UNLOCK(ha);
1354 
1355 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1356 
1357 	if (rval != QL_SUCCESS) {
1358 		return (EFAULT);
1359 	} else {
1360 		return (0);
1361 	}
1362 }
1363 
1364 /*
1365  * ql_vpd_findtag
1366  *	Search the passed vpd buffer for the requested VPD tag type.
1367  *
1368  * Input:
1369  *	ha	= adapter state pointer.
1370  *	vpdbuf	= Pointer to start of the buffer to search
1371  *	op	= VPD opcode to find (must be NULL terminated).
1372  *
1373  * Returns:
1374  *	Pointer to the opcode in the buffer if opcode found.
1375  *	NULL if opcode is not found.
1376  *
1377  * Context:
1378  *	Kernel context.
1379  */
1380 static uint8_t *
1381 ql_vpd_findtag(ql_adapter_state_t *ha, uint8_t *vpdbuf, int8_t *opcode)
1382 {
1383 	uint8_t		*vpd = vpdbuf;
1384 	uint8_t		*end = vpdbuf + QL_24XX_VPD_SIZE;
1385 	uint32_t	found = 0;
1386 
1387 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1388 
1389 	if (vpdbuf == NULL || opcode == NULL) {
1390 		EL(ha, "null parameter passed!\n");
1391 		return (NULL);
1392 	}
1393 
1394 	while (vpd < end) {
1395 
1396 		/* check for end of vpd */
1397 		if (vpd[0] == VPD_TAG_END) {
1398 			if (opcode[0] == VPD_TAG_END) {
1399 				found = 1;
1400 			} else {
1401 				found = 0;
1402 			}
1403 			break;
1404 		}
1405 
1406 		/* check opcode */
1407 		if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1408 			/* found opcode requested */
1409 			found = 1;
1410 			break;
1411 		}
1412 
1413 		/*
1414 		 * Didn't find the opcode, so calculate start of
1415 		 * next tag. Depending on the current tag type,
1416 		 * the length field can be 1 or 2 bytes
1417 		 */
1418 		if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1419 			vpd += (vpd[2] << 8) + vpd[1] + 3;
1420 		} else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1421 			vpd += 3;
1422 		} else {
1423 			vpd += vpd[2] +3;
1424 		}
1425 	}
1426 
1427 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1428 
1429 	return (found == 1 ? vpd : NULL);
1430 }
1431 
1432 /*
1433  * ql_vpd_lookup
1434  *	Return the VPD data for the request VPD tag
1435  *
1436  * Input:
1437  *	ha	= adapter state pointer.
1438  *	opcode	= VPD opcode to find (must be NULL terminated).
1439  *	bp	= Pointer to returned data buffer.
1440  *	bplen	= Length of returned data buffer.
1441  *
1442  * Returns:
1443  *	Length of data copied into returned data buffer.
1444  *		>0 = VPD data field (NULL terminated)
1445  *		 0 = no data.
1446  *		-1 = Could not find opcode in vpd buffer / error.
1447  *
1448  * Context:
1449  *	Kernel context.
1450  *
1451  * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1452  *
1453  */
1454 int32_t
1455 ql_vpd_lookup(ql_adapter_state_t *ha, uint8_t *opcode, uint8_t *bp,
1456     int32_t bplen)
1457 {
1458 	uint8_t		*vpd;
1459 	uint8_t		*vpdbuf;
1460 	int32_t		len = -1;
1461 
1462 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1463 
1464 	if (opcode == NULL || bp == NULL || bplen < 1) {
1465 		EL(ha, "invalid parameter passed: opcode=%ph, "
1466 		    "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1467 		return (len);
1468 	}
1469 
1470 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
1471 		return (len);
1472 	}
1473 
1474 	if ((vpdbuf = (uint8_t *)kmem_zalloc(QL_24XX_VPD_SIZE,
1475 	    KM_SLEEP)) == NULL) {
1476 		EL(ha, "unable to allocate vpd memory\n");
1477 		return (len);
1478 	}
1479 
1480 	if ((ql_vpd_dump(ha, vpdbuf, (int)FKIOCTL)) != 0) {
1481 		kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1482 		EL(ha, "unable to retrieve VPD data\n");
1483 		return (len);
1484 	}
1485 
1486 	if ((vpd = ql_vpd_findtag(ha, vpdbuf, (int8_t *)opcode)) != NULL) {
1487 		/*
1488 		 * Found the tag
1489 		 */
1490 		if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1491 		    *opcode == VPD_TAG_LRTC) {
1492 			/*
1493 			 * we found it, but the tag doesn't have a data
1494 			 * field.
1495 			 */
1496 			len = 0;
1497 		} else if (!(strncmp((char *)vpd, (char *)
1498 		    VPD_TAG_PRODID, 1))) {
1499 			len = vpd[2] << 8;
1500 			len += vpd[1];
1501 		} else {
1502 			len = vpd[2];
1503 		}
1504 
1505 		/*
1506 		 * make sure that the vpd len doesn't exceed the
1507 		 * vpd end
1508 		 */
1509 		if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) {
1510 			EL(ha, "vpd tag len (%xh) exceeds vpd buffer "
1511 			    "length\n", len);
1512 			len = -1;
1513 		}
1514 	}
1515 
1516 	if (len >= 0) {
1517 		/*
1518 		 * make sure we don't exceed callers buffer space len
1519 		 */
1520 		if (len > bplen) {
1521 			len = bplen-1;
1522 		}
1523 
1524 		/* copy the data back */
1525 		(void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len);
1526 		bp[len] = NULL;
1527 	} else {
1528 		/* error -- couldn't find tag */
1529 		bp[0] = NULL;
1530 		if (opcode[1] != NULL) {
1531 			EL(ha, "unable to find tag '%s'\n", opcode);
1532 		} else {
1533 			EL(ha, "unable to find tag '%xh'\n", opcode[0]);
1534 		}
1535 	}
1536 
1537 	kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1538 
1539 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1540 
1541 	return (len);
1542 }
1543 
1544 /*
1545  * ql_r_m_w_flash
1546  *	Read modify write from user space to flash.
1547  *
1548  * Input:
1549  *	ha:	adapter state pointer.
1550  *	dp:	source byte pointer.
1551  *	bc:	byte count.
1552  *	faddr:	flash byte address.
1553  *	mode:	flags.
1554  *
1555  * Returns:
1556  *	ql local function return status code.
1557  *
1558  * Context:
1559  *	Kernel context.
1560  */
1561 int
1562 ql_r_m_w_flash(ql_adapter_state_t *ha, caddr_t dp, uint32_t bc, uint32_t faddr,
1563     int mode)
1564 {
1565 	uint8_t		*bp;
1566 	uint32_t	xfer, bsize, saddr, ofst;
1567 	int		rval = 0;
1568 
1569 	QL_PRINT_9(CE_CONT, "(%d): started, dp=%ph, faddr=%xh, bc=%xh\n",
1570 	    ha->instance, (void *)dp, faddr, bc);
1571 
1572 	bsize = ha->xioctl->fdesc.block_size;
1573 	saddr = faddr & ~(bsize - 1);
1574 	ofst = faddr & (bsize - 1);
1575 
1576 	if ((bp = kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
1577 		EL(ha, "kmem_zalloc=null\n");
1578 		return (QL_MEMORY_ALLOC_FAILED);
1579 	}
1580 
1581 	while (bc) {
1582 		xfer = bc > bsize ? bsize : bc;
1583 		if (ofst + xfer > bsize) {
1584 			xfer = bsize - ofst;
1585 		}
1586 		QL_PRINT_9(CE_CONT, "(%d): dp=%ph, saddr=%xh, bc=%xh, "
1587 		    "ofst=%xh, xfer=%xh\n", ha->instance, (void *)dp, saddr,
1588 		    bc, ofst, xfer);
1589 
1590 		if (ofst || xfer < bsize) {
1591 			/* Dump Flash sector. */
1592 			if ((rval = ql_dump_fcode(ha, bp, bsize, saddr)) !=
1593 			    QL_SUCCESS) {
1594 				EL(ha, "dump_flash status=%x\n", rval);
1595 				break;
1596 			}
1597 		}
1598 
1599 		/* Set new data. */
1600 		if ((rval = ddi_copyin(dp, (caddr_t)(bp + ofst), xfer,
1601 		    mode)) != 0) {
1602 			EL(ha, "ddi_copyin status=%xh, dp=%ph, ofst=%xh, "
1603 			    "xfer=%xh\n", rval, (void *)dp, ofst, xfer);
1604 			rval = QL_FUNCTION_FAILED;
1605 			break;
1606 		}
1607 
1608 		/* Write to flash. */
1609 		if ((rval = ql_load_fcode(ha, bp, bsize, saddr)) !=
1610 		    QL_SUCCESS) {
1611 			EL(ha, "load_flash status=%x\n", rval);
1612 			break;
1613 		}
1614 		bc -= xfer;
1615 		dp += xfer;
1616 		saddr += bsize;
1617 		ofst = 0;
1618 	}
1619 
1620 	kmem_free(bp, bsize);
1621 
1622 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1623 
1624 	return (rval);
1625 }
1626 
1627 /*
1628  * ql_adm_op
1629  *	Performs qladm utility operations
1630  *
1631  * Input:
1632  *	ha:	adapter state pointer.
1633  *	arg:	driver_op_t structure pointer.
1634  *	mode:	flags.
1635  *
1636  * Returns:
1637  *
1638  * Context:
1639  *	Kernel context.
1640  */
1641 static int
1642 ql_adm_op(ql_adapter_state_t *ha, void *arg, int mode)
1643 {
1644 	ql_adm_op_t		dop;
1645 	int			rval = 0;
1646 
1647 	if (ddi_copyin(arg, &dop, sizeof (ql_adm_op_t), mode) != 0) {
1648 		EL(ha, "failed, driver_op_t ddi_copyin\n");
1649 		return (EFAULT);
1650 	}
1651 
1652 	QL_PRINT_9(CE_CONT, "(%d): started, cmd=%xh, buffer=%llx,"
1653 	    " length=%xh, option=%xh\n", ha->instance, dop.cmd, dop.buffer,
1654 	    dop.length, dop.option);
1655 
1656 	switch (dop.cmd) {
1657 	case QL_ADAPTER_INFO:
1658 		rval = ql_adm_adapter_info(ha, &dop, mode);
1659 		break;
1660 
1661 	case QL_EXTENDED_LOGGING:
1662 		rval = ql_adm_extended_logging(ha, &dop);
1663 		break;
1664 
1665 	case QL_LOOP_RESET:
1666 		rval = ql_adm_loop_reset(ha);
1667 		break;
1668 
1669 	case QL_DEVICE_LIST:
1670 		rval = ql_adm_device_list(ha, &dop, mode);
1671 		break;
1672 
1673 	case QL_PROP_UPDATE_INT:
1674 		rval = ql_adm_prop_update_int(ha, &dop, mode);
1675 		break;
1676 
1677 	case QL_UPDATE_PROPERTIES:
1678 		rval = ql_adm_update_properties(ha);
1679 		break;
1680 
1681 	case QL_FW_DUMP:
1682 		rval = ql_adm_fw_dump(ha, &dop, arg, mode);
1683 		break;
1684 
1685 	case QL_NVRAM_LOAD:
1686 		rval = ql_adm_nvram_load(ha, &dop, mode);
1687 		break;
1688 
1689 	case QL_NVRAM_DUMP:
1690 		rval = ql_adm_nvram_dump(ha, &dop, mode);
1691 		break;
1692 
1693 	case QL_FLASH_LOAD:
1694 		rval = ql_adm_flash_load(ha, &dop, mode);
1695 		break;
1696 
1697 	case QL_VPD_LOAD:
1698 		rval = ql_adm_vpd_load(ha, &dop, mode);
1699 		break;
1700 
1701 	case QL_VPD_DUMP:
1702 		rval = ql_adm_vpd_dump(ha, &dop, mode);
1703 		break;
1704 
1705 	case QL_VPD_GETTAG:
1706 		rval = ql_adm_vpd_gettag(ha, &dop, mode);
1707 		break;
1708 
1709 	case QL_UPD_FWMODULE:
1710 		rval = ql_adm_updfwmodule(ha, &dop, mode);
1711 		break;
1712 
1713 	default:
1714 		EL(ha, "unsupported driver op cmd: %x\n", dop.cmd);
1715 		return (EINVAL);
1716 	}
1717 
1718 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1719 
1720 	return (rval);
1721 }
1722 
1723 /*
1724  * ql_adm_adapter_info
1725  *	Performs qladm QL_ADAPTER_INFO command
1726  *
1727  * Input:
1728  *	ha:	adapter state pointer.
1729  *	dop:	ql_adm_op_t structure pointer.
1730  *	mode:	flags.
1731  *
1732  * Returns:
1733  *
1734  * Context:
1735  *	Kernel context.
1736  */
1737 static int
1738 ql_adm_adapter_info(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1739 {
1740 	ql_adapter_info_t	hba;
1741 	uint8_t			*dp;
1742 	uint32_t		length;
1743 	int			rval, i;
1744 
1745 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1746 
1747 	hba.device_id = ha->device_id;
1748 
1749 	dp = CFG_IST(ha, CFG_CTRL_242581) ?
1750 	    &ha->init_ctrl_blk.cb24.port_name[0] :
1751 	    &ha->init_ctrl_blk.cb.port_name[0];
1752 	bcopy(dp, hba.wwpn, 8);
1753 
1754 	hba.d_id = ha->d_id.b24;
1755 
1756 	if (ha->xioctl->fdesc.flash_size == 0 &&
1757 	    !(CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id)) {
1758 		if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1759 			EL(ha, "ql_stall_driver failed\n");
1760 			return (EBUSY);
1761 		}
1762 
1763 		if ((rval = ql_setup_fcache(ha)) != QL_SUCCESS) {
1764 			EL(ha, "ql_setup_flash failed=%xh\n", rval);
1765 			if (rval == QL_FUNCTION_TIMEOUT) {
1766 				return (EBUSY);
1767 			}
1768 			return (EIO);
1769 		}
1770 
1771 		/* Resume I/O */
1772 		if (CFG_IST(ha, CFG_CTRL_242581)) {
1773 			ql_restart_driver(ha);
1774 		} else {
1775 			EL(ha, "isp_abort_needed for restart\n");
1776 			ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
1777 			    DRIVER_STALL);
1778 		}
1779 	}
1780 	hba.flash_size = ha->xioctl->fdesc.flash_size;
1781 
1782 	(void) strcpy(hba.driver_ver, QL_VERSION);
1783 
1784 	(void) sprintf(hba.fw_ver, "%d.%d.%d", ha->fw_major_version,
1785 	    ha->fw_minor_version, ha->fw_subminor_version);
1786 
1787 	bzero(hba.fcode_ver, sizeof (hba.fcode_ver));
1788 
1789 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
1790 	rval = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
1791 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&dp, &i);
1792 	length = i;
1793 	if (rval != DDI_PROP_SUCCESS) {
1794 		EL(ha, "failed, ddi_getlongprop=%xh\n", rval);
1795 	} else {
1796 		if (length > (uint32_t)sizeof (hba.fcode_ver)) {
1797 			length = sizeof (hba.fcode_ver) - 1;
1798 		}
1799 		bcopy((void *)dp, (void *)hba.fcode_ver, length);
1800 		kmem_free(dp, length);
1801 	}
1802 
1803 	if (ddi_copyout((void *)&hba, (void *)(uintptr_t)dop->buffer,
1804 	    dop->length, mode) != 0) {
1805 		EL(ha, "failed, ddi_copyout\n");
1806 		return (EFAULT);
1807 	}
1808 
1809 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1810 
1811 	return (0);
1812 }
1813 
1814 /*
1815  * ql_adm_extended_logging
1816  *	Performs qladm QL_EXTENDED_LOGGING command
1817  *
1818  * Input:
1819  *	ha:	adapter state pointer.
1820  *	dop:	ql_adm_op_t structure pointer.
1821  *
1822  * Returns:
1823  *
1824  * Context:
1825  *	Kernel context.
1826  */
1827 static int
1828 ql_adm_extended_logging(ql_adapter_state_t *ha, ql_adm_op_t *dop)
1829 {
1830 	char	prop_name[MAX_PROP_LENGTH];
1831 	int	rval;
1832 
1833 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1834 
1835 	(void) sprintf(prop_name, "hba%d-extended-logging", ha->instance);
1836 
1837 	/*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1838 	rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
1839 	    (int)dop->option);
1840 	if (rval != DDI_PROP_SUCCESS) {
1841 		EL(ha, "failed, prop_update = %xh\n", rval);
1842 		return (EINVAL);
1843 	} else {
1844 		dop->option ?
1845 		    (ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING) :
1846 		    (ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING);
1847 	}
1848 
1849 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1850 
1851 	return (0);
1852 }
1853 
1854 /*
1855  * ql_adm_loop_reset
1856  *	Performs qladm QL_LOOP_RESET command
1857  *
1858  * Input:
1859  *	ha:	adapter state pointer.
1860  *
1861  * Returns:
1862  *
1863  * Context:
1864  *	Kernel context.
1865  */
1866 static int
1867 ql_adm_loop_reset(ql_adapter_state_t *ha)
1868 {
1869 	int	rval;
1870 
1871 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1872 
1873 	if (ha->task_daemon_flags & LOOP_DOWN) {
1874 		(void) ql_full_login_lip(ha);
1875 	} else if ((rval = ql_full_login_lip(ha)) != QL_SUCCESS) {
1876 		EL(ha, "failed, ql_initiate_lip=%xh\n", rval);
1877 		return (EIO);
1878 	}
1879 
1880 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1881 
1882 	return (0);
1883 }
1884 
1885 /*
1886  * ql_adm_device_list
1887  *	Performs qladm QL_DEVICE_LIST command
1888  *
1889  * Input:
1890  *	ha:	adapter state pointer.
1891  *	dop:	ql_adm_op_t structure pointer.
1892  *	mode:	flags.
1893  *
1894  * Returns:
1895  *
1896  * Context:
1897  *	Kernel context.
1898  */
1899 static int
1900 ql_adm_device_list(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1901 {
1902 	ql_device_info_t	dev;
1903 	ql_link_t		*link;
1904 	ql_tgt_t		*tq;
1905 	uint32_t		index, cnt;
1906 
1907 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1908 
1909 	cnt = 0;
1910 	dev.address = 0xffffffff;
1911 
1912 	/* Scan port list for requested target and fill in the values */
1913 	for (link = NULL, index = 0;
1914 	    index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1915 		for (link = ha->dev[index].first; link != NULL;
1916 		    link = link->next) {
1917 			tq = link->base_address;
1918 
1919 			if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1920 				continue;
1921 			}
1922 			if (cnt != dop->option) {
1923 				cnt++;
1924 				continue;
1925 			}
1926 			/* fill in the values */
1927 			bcopy(tq->port_name, dev.wwpn, 8);
1928 			dev.address = tq->d_id.b24;
1929 			dev.loop_id = tq->loop_id;
1930 			if (tq->flags & TQF_TAPE_DEVICE) {
1931 				dev.type = FCT_TAPE;
1932 			} else if (tq->flags & TQF_INITIATOR_DEVICE) {
1933 				dev.type = FCT_INITIATOR;
1934 			} else {
1935 				dev.type = FCT_TARGET;
1936 			}
1937 			break;
1938 		}
1939 	}
1940 
1941 	if (ddi_copyout((void *)&dev, (void *)(uintptr_t)dop->buffer,
1942 	    dop->length, mode) != 0) {
1943 		EL(ha, "failed, ddi_copyout\n");
1944 		return (EFAULT);
1945 	}
1946 
1947 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1948 
1949 	return (0);
1950 }
1951 
1952 /*
1953  * ql_adm_update_properties
1954  *	Performs qladm QL_UPDATE_PROPERTIES command
1955  *
1956  * Input:
1957  *	ha:	adapter state pointer.
1958  *
1959  * Returns:
1960  *
1961  * Context:
1962  *	Kernel context.
1963  */
1964 static int
1965 ql_adm_update_properties(ql_adapter_state_t *ha)
1966 {
1967 	ql_comb_init_cb_t	init_ctrl_blk;
1968 	ql_comb_ip_init_cb_t	ip_init_ctrl_blk;
1969 
1970 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1971 
1972 	/* Stall driver instance. */
1973 	(void) ql_stall_driver(ha, 0);
1974 
1975 	/* Save init control blocks. */
1976 	bcopy(&ha->init_ctrl_blk, &init_ctrl_blk, sizeof (ql_comb_init_cb_t));
1977 	bcopy(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1978 	    sizeof (ql_comb_ip_init_cb_t));
1979 
1980 	/* Update PCI configration. */
1981 	(void) ql_pci_sbus_config(ha);
1982 
1983 	/* Get configuration properties. */
1984 	(void) ql_nvram_config(ha);
1985 
1986 	/* Check for init firmware required. */
1987 	if (bcmp(&ha->init_ctrl_blk, &init_ctrl_blk,
1988 	    sizeof (ql_comb_init_cb_t)) != 0 ||
1989 	    bcmp(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1990 	    sizeof (ql_comb_ip_init_cb_t)) != 0) {
1991 
1992 		EL(ha, "isp_abort_needed\n");
1993 		ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1994 		TASK_DAEMON_LOCK(ha);
1995 		ha->task_daemon_flags |= LOOP_DOWN | ISP_ABORT_NEEDED;
1996 		TASK_DAEMON_UNLOCK(ha);
1997 	}
1998 
1999 	/* Update AEN queue. */
2000 	if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
2001 		ql_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
2002 	}
2003 
2004 	/* Restart driver instance. */
2005 	ql_restart_driver(ha);
2006 
2007 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2008 
2009 	return (0);
2010 }
2011 
2012 /*
2013  * ql_adm_prop_update_int
2014  *	Performs qladm QL_PROP_UPDATE_INT command
2015  *
2016  * Input:
2017  *	ha:	adapter state pointer.
2018  *	dop:	ql_adm_op_t structure pointer.
2019  *	mode:	flags.
2020  *
2021  * Returns:
2022  *
2023  * Context:
2024  *	Kernel context.
2025  */
2026 static int
2027 ql_adm_prop_update_int(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2028 {
2029 	char	*prop_name;
2030 	int	rval;
2031 
2032 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2033 
2034 	prop_name = kmem_zalloc(dop->length, KM_SLEEP);
2035 	if (prop_name == NULL) {
2036 		EL(ha, "failed, kmem_zalloc\n");
2037 		return (ENOMEM);
2038 	}
2039 
2040 	if (ddi_copyin((void *)(uintptr_t)dop->buffer, prop_name, dop->length,
2041 	    mode) != 0) {
2042 		EL(ha, "failed, prop_name ddi_copyin\n");
2043 		kmem_free(prop_name, dop->length);
2044 		return (EFAULT);
2045 	}
2046 
2047 	/*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2048 	if ((rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
2049 	    (int)dop->option)) != DDI_PROP_SUCCESS) {
2050 		EL(ha, "failed, prop_update=%xh\n", rval);
2051 		kmem_free(prop_name, dop->length);
2052 		return (EINVAL);
2053 	}
2054 
2055 	kmem_free(prop_name, dop->length);
2056 
2057 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2058 
2059 	return (0);
2060 }
2061 
2062 /*
2063  * ql_adm_fw_dump
2064  *	Performs qladm QL_FW_DUMP command
2065  *
2066  * Input:
2067  *	ha:	adapter state pointer.
2068  *	dop:	ql_adm_op_t structure pointer.
2069  *	udop:	user space ql_adm_op_t structure pointer.
2070  *	mode:	flags.
2071  *
2072  * Returns:
2073  *
2074  * Context:
2075  *	Kernel context.
2076  */
2077 static int
2078 ql_adm_fw_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, void *udop, int mode)
2079 {
2080 	caddr_t	dmp;
2081 
2082 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2083 
2084 	if (dop->length < ha->risc_dump_size) {
2085 		EL(ha, "failed, incorrect length=%xh, size=%xh\n",
2086 		    dop->length, ha->risc_dump_size);
2087 		return (EINVAL);
2088 	}
2089 
2090 	if (ha->ql_dump_state & QL_DUMP_VALID) {
2091 		dmp = kmem_zalloc(ha->risc_dump_size, KM_SLEEP);
2092 		if (dmp == NULL) {
2093 			EL(ha, "failed, kmem_zalloc\n");
2094 			return (ENOMEM);
2095 		}
2096 
2097 		dop->length = (uint32_t)ql_ascii_fw_dump(ha, dmp);
2098 		if (ddi_copyout((void *)dmp, (void *)(uintptr_t)dop->buffer,
2099 		    dop->length, mode) != 0) {
2100 			EL(ha, "failed, ddi_copyout\n");
2101 			kmem_free(dmp, ha->risc_dump_size);
2102 			return (EFAULT);
2103 		}
2104 
2105 		kmem_free(dmp, ha->risc_dump_size);
2106 		ha->ql_dump_state |= QL_DUMP_UPLOADED;
2107 
2108 	} else {
2109 		EL(ha, "failed, no dump file\n");
2110 		dop->length = 0;
2111 	}
2112 
2113 	if (ddi_copyout(dop, udop, sizeof (ql_adm_op_t), mode) != 0) {
2114 		EL(ha, "failed, driver_op_t ddi_copyout\n");
2115 		return (EFAULT);
2116 	}
2117 
2118 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2119 
2120 	return (0);
2121 }
2122 
2123 /*
2124  * ql_adm_nvram_dump
2125  *	Performs qladm QL_NVRAM_DUMP command
2126  *
2127  * Input:
2128  *	ha:	adapter state pointer.
2129  *	dop:	ql_adm_op_t structure pointer.
2130  *	mode:	flags.
2131  *
2132  * Returns:
2133  *
2134  * Context:
2135  *	Kernel context.
2136  */
2137 static int
2138 ql_adm_nvram_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2139 {
2140 	int		rval;
2141 
2142 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2143 
2144 	if (dop->length < ha->nvram_cache->size) {
2145 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2146 		    ha->nvram_cache->size);
2147 		return (EINVAL);
2148 	}
2149 
2150 	if ((rval = ql_nv_util_dump(ha, (void *)(uintptr_t)dop->buffer,
2151 	    mode)) != 0) {
2152 		EL(ha, "failed, ql_nv_util_dump\n");
2153 	} else {
2154 		/*EMPTY*/
2155 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2156 	}
2157 
2158 	return (rval);
2159 }
2160 
2161 /*
2162  * ql_adm_nvram_load
2163  *	Performs qladm QL_NVRAM_LOAD command
2164  *
2165  * Input:
2166  *	ha:	adapter state pointer.
2167  *	dop:	ql_adm_op_t structure pointer.
2168  *	mode:	flags.
2169  *
2170  * Returns:
2171  *
2172  * Context:
2173  *	Kernel context.
2174  */
2175 static int
2176 ql_adm_nvram_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2177 {
2178 	int		rval;
2179 
2180 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2181 
2182 	if (dop->length < ha->nvram_cache->size) {
2183 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2184 		    ha->nvram_cache->size);
2185 		return (EINVAL);
2186 	}
2187 
2188 	if ((rval = ql_nv_util_load(ha, (void *)(uintptr_t)dop->buffer,
2189 	    mode)) != 0) {
2190 		EL(ha, "failed, ql_nv_util_dump\n");
2191 	} else {
2192 		/*EMPTY*/
2193 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2194 	}
2195 
2196 	return (rval);
2197 }
2198 
2199 /*
2200  * ql_adm_flash_load
2201  *	Performs qladm QL_FLASH_LOAD command
2202  *
2203  * Input:
2204  *	ha:	adapter state pointer.
2205  *	dop:	ql_adm_op_t structure pointer.
2206  *	mode:	flags.
2207  *
2208  * Returns:
2209  *
2210  * Context:
2211  *	Kernel context.
2212  */
2213 static int
2214 ql_adm_flash_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2215 {
2216 	uint8_t	*dp;
2217 	int	rval;
2218 
2219 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2220 
2221 	if ((dp = kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2222 		EL(ha, "failed, kmem_zalloc\n");
2223 		return (ENOMEM);
2224 	}
2225 
2226 	if (ddi_copyin((void *)(uintptr_t)dop->buffer, dp, dop->length,
2227 	    mode) != 0) {
2228 		EL(ha, "ddi_copyin failed\n");
2229 		kmem_free(dp, dop->length);
2230 		return (EFAULT);
2231 	}
2232 
2233 	if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
2234 		EL(ha, "ql_stall_driver failed\n");
2235 		kmem_free(dp, dop->length);
2236 		return (EBUSY);
2237 	}
2238 
2239 	rval = (CFG_IST(ha, CFG_CTRL_242581) ?
2240 	    ql_24xx_load_flash(ha, dp, dop->length, dop->option) :
2241 	    ql_load_flash(ha, dp, dop->length));
2242 
2243 	ql_restart_driver(ha);
2244 
2245 	kmem_free(dp, dop->length);
2246 
2247 	if (rval != QL_SUCCESS) {
2248 		EL(ha, "failed\n");
2249 		return (EIO);
2250 	}
2251 
2252 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2253 
2254 	return (0);
2255 }
2256 
2257 /*
2258  * ql_adm_vpd_dump
2259  *	Performs qladm QL_VPD_DUMP command
2260  *
2261  * Input:
2262  *	ha:	adapter state pointer.
2263  *	dop:	ql_adm_op_t structure pointer.
2264  *	mode:	flags.
2265  *
2266  * Returns:
2267  *
2268  * Context:
2269  *	Kernel context.
2270  */
2271 static int
2272 ql_adm_vpd_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2273 {
2274 	int		rval;
2275 
2276 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2277 
2278 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2279 		EL(ha, "hba does not support VPD\n");
2280 		return (EINVAL);
2281 	}
2282 
2283 	if (dop->length < QL_24XX_VPD_SIZE) {
2284 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2285 		    QL_24XX_VPD_SIZE);
2286 		return (EINVAL);
2287 	}
2288 
2289 	if ((rval = ql_vpd_dump(ha, (void *)(uintptr_t)dop->buffer, mode))
2290 	    != 0) {
2291 		EL(ha, "failed, ql_vpd_dump\n");
2292 	} else {
2293 		/*EMPTY*/
2294 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2295 	}
2296 
2297 	return (rval);
2298 }
2299 
2300 /*
2301  * ql_adm_vpd_load
2302  *	Performs qladm QL_VPD_LOAD command
2303  *
2304  * Input:
2305  *	ha:	adapter state pointer.
2306  *	dop:	ql_adm_op_t structure pointer.
2307  *	mode:	flags.
2308  *
2309  * Returns:
2310  *
2311  * Context:
2312  *	Kernel context.
2313  */
2314 static int
2315 ql_adm_vpd_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2316 {
2317 	int		rval;
2318 
2319 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2320 
2321 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2322 		EL(ha, "hba does not support VPD\n");
2323 		return (EINVAL);
2324 	}
2325 
2326 	if (dop->length < QL_24XX_VPD_SIZE) {
2327 		EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2328 		    QL_24XX_VPD_SIZE);
2329 		return (EINVAL);
2330 	}
2331 
2332 	if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)dop->buffer, mode))
2333 	    != 0) {
2334 		EL(ha, "failed, ql_vpd_dump\n");
2335 	} else {
2336 		/*EMPTY*/
2337 		QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2338 	}
2339 
2340 	return (rval);
2341 }
2342 
2343 /*
2344  * ql_adm_vpd_gettag
2345  *	Performs qladm QL_VPD_GETTAG command
2346  *
2347  * Input:
2348  *	ha:	adapter state pointer.
2349  *	dop:	ql_adm_op_t structure pointer.
2350  *	mode:	flags.
2351  *
2352  * Returns:
2353  *
2354  * Context:
2355  *	Kernel context.
2356  */
2357 static int
2358 ql_adm_vpd_gettag(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2359 {
2360 	int		rval = 0;
2361 	uint8_t		*lbuf;
2362 
2363 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2364 
2365 	if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
2366 		EL(ha, "hba does not support VPD\n");
2367 		return (EINVAL);
2368 	}
2369 
2370 	if ((lbuf = (uint8_t *)kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2371 		EL(ha, "mem alloc failure of %xh bytes\n", dop->length);
2372 		rval = EFAULT;
2373 	} else {
2374 		if (ddi_copyin((void *)(uintptr_t)dop->buffer, lbuf,
2375 		    dop->length, mode) != 0) {
2376 			EL(ha, "ddi_copyin failed\n");
2377 			kmem_free(lbuf, dop->length);
2378 			return (EFAULT);
2379 		}
2380 
2381 		if ((rval = ql_vpd_lookup(ha, lbuf, lbuf, (int32_t)
2382 		    dop->length)) < 0) {
2383 			EL(ha, "failed vpd_lookup\n");
2384 		} else {
2385 			if (ddi_copyout(lbuf, (void *)(uintptr_t)dop->buffer,
2386 			    strlen((int8_t *)lbuf)+1, mode) != 0) {
2387 				EL(ha, "failed, ddi_copyout\n");
2388 				rval = EFAULT;
2389 			} else {
2390 				rval = 0;
2391 			}
2392 		}
2393 		kmem_free(lbuf, dop->length);
2394 	}
2395 
2396 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2397 
2398 	return (rval);
2399 }
2400 
2401 /*
2402  * ql_adm_updfwmodule
2403  *	Performs qladm QL_UPD_FWMODULE command
2404  *
2405  * Input:
2406  *	ha:	adapter state pointer.
2407  *	dop:	ql_adm_op_t structure pointer.
2408  *	mode:	flags.
2409  *
2410  * Returns:
2411  *
2412  * Context:
2413  *	Kernel context.
2414  */
2415 /* ARGSUSED */
2416 static int
2417 ql_adm_updfwmodule(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2418 {
2419 	int			rval = DDI_SUCCESS;
2420 	ql_link_t		*link;
2421 	ql_adapter_state_t	*ha2 = NULL;
2422 	uint16_t		fw_class = (uint16_t)dop->option;
2423 
2424 	QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2425 
2426 	/* zero the firmware module reference count */
2427 	for (link = ql_hba.first; link != NULL; link = link->next) {
2428 		ha2 = link->base_address;
2429 		if (fw_class == ha2->fw_class) {
2430 			if ((rval = ddi_modclose(ha2->fw_module)) !=
2431 			    DDI_SUCCESS) {
2432 				EL(ha2, "modclose rval=%xh\n", rval);
2433 				break;
2434 			}
2435 			ha2->fw_module = NULL;
2436 		}
2437 	}
2438 
2439 	/* reload the f/w modules */
2440 	for (link = ql_hba.first; link != NULL; link = link->next) {
2441 		ha2 = link->base_address;
2442 
2443 		if ((fw_class == ha2->fw_class) && (ha2->fw_class == NULL)) {
2444 			if ((rval = (int32_t)ql_fwmodule_resolve(ha2)) !=
2445 			    QL_SUCCESS) {
2446 				EL(ha2, "unable to load f/w module: '%x' "
2447 				    "(rval=%xh)\n", ha2->fw_class, rval);
2448 				rval = EFAULT;
2449 			} else {
2450 				EL(ha2, "f/w module updated: '%x'\n",
2451 				    ha2->fw_class);
2452 			}
2453 
2454 			EL(ha2, "isp abort needed (%d)\n", ha->instance);
2455 
2456 			ql_awaken_task_daemon(ha2, NULL, ISP_ABORT_NEEDED, 0);
2457 
2458 			rval = 0;
2459 		}
2460 	}
2461 
2462 	QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2463 
2464 	return (rval);
2465 }
2466