xref: /titanic_50/usr/src/uts/common/io/comstar/port/qlt/qlt.c (revision 1a5e258f5471356ca102c7176637cdce45bac147)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 QLogic Corporation.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
29  */
30 
31 #include <sys/conf.h>
32 #include <sys/ddi.h>
33 #include <sys/stat.h>
34 #include <sys/pci.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/file.h>
38 #include <sys/cred.h>
39 #include <sys/byteorder.h>
40 #include <sys/atomic.h>
41 #include <sys/scsi/scsi.h>
42 
43 #include <sys/stmf_defines.h>
44 #include <sys/fct_defines.h>
45 #include <sys/stmf.h>
46 #include <sys/stmf_ioctl.h>
47 #include <sys/portif.h>
48 #include <sys/fct.h>
49 
50 #include "qlt.h"
51 #include "qlt_dma.h"
52 #include "qlt_ioctl.h"
53 #include "qlt_open.h"
54 
55 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
57 static void qlt_enable_intr(qlt_state_t *);
58 static void qlt_disable_intr(qlt_state_t *);
59 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
60 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
61 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
62     uint32_t word_count, uint32_t risc_addr);
63 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
64 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
65 					uint32_t dma_size);
66 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
67 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
68 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
69 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
70     stmf_state_change_info_t *ssci);
71 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
72 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
73 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
74 static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
75 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
76 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
77 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
78 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
79     uint8_t *rsp);
80 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
81 static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
82 static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
83 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
84 static void qlt_verify_fw(qlt_state_t *qlt);
85 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
86 fct_status_t qlt_port_start(caddr_t arg);
87 fct_status_t qlt_port_stop(caddr_t arg);
88 fct_status_t qlt_port_online(qlt_state_t *qlt);
89 fct_status_t qlt_port_offline(qlt_state_t *qlt);
90 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
91     fct_link_info_t *li);
92 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
93 static fct_status_t qlt_force_lip(qlt_state_t *);
94 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
95 						fct_flogi_xchg_t *fx);
96 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
97 void qlt_handle_resp_queue_update(qlt_state_t *qlt);
98 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
99     fct_remote_port_t *rp, fct_cmd_t *login);
100 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
101     fct_remote_port_t *rp);
102 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
103 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
104 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
105     fct_cmd_t *cmd, int terminate);
106 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
107 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
108 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
109     fct_cmd_t *cmd, uint32_t flags);
110 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
111 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
112 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
113 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
114 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
115 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
116 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
117     stmf_data_buf_t *dbuf, uint32_t ioflags);
118 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
119 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
120 static void qlt_release_intr(qlt_state_t *qlt);
121 static int qlt_setup_interrupts(qlt_state_t *qlt);
122 static void qlt_destroy_mutex(qlt_state_t *qlt);
123 
124 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
125     uint32_t words);
126 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
127     caddr_t buf, uint_t size_left);
128 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
129     caddr_t buf, uint_t size_left);
130 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
131     int count, uint_t size_left);
132 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
133     cred_t *credp, int *rval);
134 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
135 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
136 
137 static int qlt_setup_msi(qlt_state_t *qlt);
138 static int qlt_setup_msix(qlt_state_t *qlt);
139 
140 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
141 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
142 static int qlt_validate_trace_desc(qlt_state_t *qlt);
143 static char *qlt_find_trace_start(qlt_state_t *qlt);
144 
145 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
146 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
147 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
148     char **prop_val);
149 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
150 static int qlt_convert_string_to_ull(char *prop, int radix,
151     u_longlong_t *result);
152 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
153 static int qlt_quiesce(dev_info_t *dip);
154 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
155     uint32_t);
156 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
157     uint32_t *);
158 static void qlt_mps_reset(qlt_state_t *qlt);
159 static void qlt_properties(qlt_state_t *qlt);
160 
161 
162 #define	SETELSBIT(bmp, els)	(bmp)[((els) >> 3) & 0x1F] = \
163 	(uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
164 
165 int qlt_enable_msix = 0;
166 int qlt_enable_msi = 1;
167 
168 
169 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
170 
171 /* Array to quickly calculate next free buf index to use */
172 #if 0
173 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
174 #endif
175 
176 static struct cb_ops qlt_cb_ops = {
177 	qlt_open,
178 	qlt_close,
179 	nodev,
180 	nodev,
181 	nodev,
182 	nodev,
183 	nodev,
184 	qlt_ioctl,
185 	nodev,
186 	nodev,
187 	nodev,
188 	nochpoll,
189 	ddi_prop_op,
190 	0,
191 	D_MP | D_NEW
192 };
193 
194 static struct dev_ops qlt_ops = {
195 	DEVO_REV,
196 	0,
197 	nodev,
198 	nulldev,
199 	nulldev,
200 	qlt_attach,
201 	qlt_detach,
202 	nodev,
203 	&qlt_cb_ops,
204 	NULL,
205 	ddi_power,
206 	qlt_quiesce
207 };
208 
209 #ifndef	PORT_SPEED_10G
210 #define	PORT_SPEED_10G		16
211 #endif
212 
213 static struct modldrv modldrv = {
214 	&mod_driverops,
215 	QLT_NAME" "QLT_VERSION,
216 	&qlt_ops,
217 };
218 
219 static struct modlinkage modlinkage = {
220 	MODREV_1, &modldrv, NULL
221 };
222 
223 void *qlt_state = NULL;
224 kmutex_t qlt_global_lock;
225 static uint32_t qlt_loaded_counter = 0;
226 
227 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
228 			"-X Mode 1 133", "--Invalid--",
229 			"-X Mode 2 66", "-X Mode 2 100",
230 			"-X Mode 2 133", " 66" };
231 
232 /* Always use 64 bit DMA. */
233 static ddi_dma_attr_t qlt_queue_dma_attr = {
234 	DMA_ATTR_V0,		/* dma_attr_version */
235 	0,			/* low DMA address range */
236 	0xffffffffffffffff,	/* high DMA address range */
237 	0xffffffff,		/* DMA counter register */
238 	64,			/* DMA address alignment */
239 	0xff,			/* DMA burstsizes */
240 	1,			/* min effective DMA size */
241 	0xffffffff,		/* max DMA xfer size */
242 	0xffffffff,		/* segment boundary */
243 	1,			/* s/g list length */
244 	1,			/* granularity of device */
245 	0			/* DMA transfer flags */
246 };
247 
248 /* qlogic logging */
249 int enable_extended_logging = 0;
250 
251 static char qlt_provider_name[] = "qlt";
252 static struct stmf_port_provider *qlt_pp;
253 
254 int
_init(void)255 _init(void)
256 {
257 	int ret;
258 
259 	ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
260 	if (ret == 0) {
261 		mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
262 		qlt_pp = (stmf_port_provider_t *)stmf_alloc(
263 		    STMF_STRUCT_PORT_PROVIDER, 0, 0);
264 		qlt_pp->pp_portif_rev = PORTIF_REV_1;
265 		qlt_pp->pp_name = qlt_provider_name;
266 		if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
267 			stmf_free(qlt_pp);
268 			mutex_destroy(&qlt_global_lock);
269 			ddi_soft_state_fini(&qlt_state);
270 			return (EIO);
271 		}
272 		ret = mod_install(&modlinkage);
273 		if (ret != 0) {
274 			(void) stmf_deregister_port_provider(qlt_pp);
275 			stmf_free(qlt_pp);
276 			mutex_destroy(&qlt_global_lock);
277 			ddi_soft_state_fini(&qlt_state);
278 		}
279 	}
280 	return (ret);
281 }
282 
283 int
_fini(void)284 _fini(void)
285 {
286 	int ret;
287 
288 	if (qlt_loaded_counter)
289 		return (EBUSY);
290 	ret = mod_remove(&modlinkage);
291 	if (ret == 0) {
292 		(void) stmf_deregister_port_provider(qlt_pp);
293 		stmf_free(qlt_pp);
294 		mutex_destroy(&qlt_global_lock);
295 		ddi_soft_state_fini(&qlt_state);
296 	}
297 	return (ret);
298 }
299 
300 int
_info(struct modinfo * modinfop)301 _info(struct modinfo *modinfop)
302 {
303 	return (mod_info(&modlinkage, modinfop));
304 }
305 
306 
307 static int
qlt_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)308 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
309 {
310 	int		instance;
311 	qlt_state_t	*qlt;
312 	ddi_device_acc_attr_t	dev_acc_attr;
313 	uint16_t	did;
314 	uint16_t	val;
315 	uint16_t	mr;
316 	size_t		discard;
317 	uint_t		ncookies;
318 	int		max_read_size;
319 	int		max_payload_size;
320 	fct_status_t	ret;
321 
322 	/* No support for suspend resume yet */
323 	if (cmd != DDI_ATTACH)
324 		return (DDI_FAILURE);
325 	instance = ddi_get_instance(dip);
326 
327 	if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
328 		return (DDI_FAILURE);
329 	}
330 
331 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
332 	    NULL) {
333 		goto attach_fail_1;
334 	}
335 
336 	qlt->instance = instance;
337 
338 	qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
339 	qlt->dip = dip;
340 
341 	if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
342 		cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
343 		goto attach_fail_1;
344 	}
345 
346 	EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
347 
348 	if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
349 		goto attach_fail_2;
350 	}
351 	did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
352 	if ((did != 0x2422) && (did != 0x2432) &&
353 	    (did != 0x8432) && (did != 0x2532) &&
354 	    (did != 0x8001)) {
355 		cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
356 		    instance, did);
357 		goto attach_fail_4;
358 	}
359 
360 	if ((did & 0xFF00) == 0x8000)
361 		qlt->qlt_81xx_chip = 1;
362 	else if ((did & 0xFF00) == 0x2500)
363 		qlt->qlt_25xx_chip = 1;
364 
365 	dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
366 	dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
367 	dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
368 	if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
369 	    &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
370 		goto attach_fail_4;
371 	}
372 	if (did == 0x2422) {
373 		uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
374 		uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
375 		pci_bits >>= 8;
376 		pci_bits &= 0xf;
377 		if ((pci_bits == 3) || (pci_bits == 7)) {
378 			cmn_err(CE_NOTE,
379 			    "!qlt(%d): HBA running at PCI%sMHz (%d)",
380 			    instance, pci_speeds[pci_bits], pci_bits);
381 		} else {
382 			cmn_err(CE_WARN,
383 			    "qlt(%d): HBA running at PCI%sMHz %s(%d)",
384 			    instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
385 			    "(Invalid)", ((pci_bits == 0) ||
386 			    (pci_bits == 8)) ? (slot ? "64 bit slot " :
387 			    "32 bit slot ") : "", pci_bits);
388 		}
389 	}
390 	if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
391 		cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
392 		    (unsigned long long)ret);
393 		goto attach_fail_5;
394 	}
395 
396 	qlt_properties(qlt);
397 
398 	if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
399 	    0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
400 		goto attach_fail_5;
401 	}
402 	if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
403 	    &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
404 	    &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
405 	    DDI_SUCCESS) {
406 		goto attach_fail_6;
407 	}
408 	if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
409 	    qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
410 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
411 	    &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
412 		goto attach_fail_7;
413 	}
414 	if (ncookies != 1)
415 		goto attach_fail_8;
416 	qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
417 	qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
418 	qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
419 	qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
420 
421 	/* mutex are inited in this function */
422 	if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
423 		goto attach_fail_8;
424 
425 	(void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
426 	    "qlt%d", instance);
427 	(void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
428 	    "%s,0", qlt->qlt_minor_name);
429 
430 	if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
431 	    instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
432 		goto attach_fail_9;
433 	}
434 
435 	cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
436 	cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
437 	mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
438 
439 	/* Setup PCI cfg space registers */
440 	max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
441 	if (max_read_size == 11)
442 		goto over_max_read_xfer_setting;
443 	if (did == 0x2422) {
444 		if (max_read_size == 512)
445 			val = 0;
446 		else if (max_read_size == 1024)
447 			val = 1;
448 		else if (max_read_size == 2048)
449 			val = 2;
450 		else if (max_read_size == 4096)
451 			val = 3;
452 		else {
453 			cmn_err(CE_WARN, "qlt(%d) malformed "
454 			    "pci-max-read-request in qlt.conf. Valid values "
455 			    "for this HBA are 512/1024/2048/4096", instance);
456 			goto over_max_read_xfer_setting;
457 		}
458 		mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
459 		mr = (uint16_t)(mr & 0xfff3);
460 		mr = (uint16_t)(mr | (val << 2));
461 		PCICFG_WR16(qlt, 0x4E, mr);
462 	} else if ((did == 0x2432) || (did == 0x8432) ||
463 	    (did == 0x2532) || (did == 0x8001)) {
464 		if (max_read_size == 128)
465 			val = 0;
466 		else if (max_read_size == 256)
467 			val = 1;
468 		else if (max_read_size == 512)
469 			val = 2;
470 		else if (max_read_size == 1024)
471 			val = 3;
472 		else if (max_read_size == 2048)
473 			val = 4;
474 		else if (max_read_size == 4096)
475 			val = 5;
476 		else {
477 			cmn_err(CE_WARN, "qlt(%d) malformed "
478 			    "pci-max-read-request in qlt.conf. Valid values "
479 			    "for this HBA are 128/256/512/1024/2048/4096",
480 			    instance);
481 			goto over_max_read_xfer_setting;
482 		}
483 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
484 		mr = (uint16_t)(mr & 0x8fff);
485 		mr = (uint16_t)(mr | (val << 12));
486 		PCICFG_WR16(qlt, 0x54, mr);
487 	} else {
488 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
489 		    "pci-max-read-request for this device (%x)",
490 		    instance, did);
491 	}
492 over_max_read_xfer_setting:;
493 
494 	max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
495 	if (max_payload_size == 11)
496 		goto over_max_payload_setting;
497 	if ((did == 0x2432) || (did == 0x8432) ||
498 	    (did == 0x2532) || (did == 0x8001)) {
499 		if (max_payload_size == 128)
500 			val = 0;
501 		else if (max_payload_size == 256)
502 			val = 1;
503 		else if (max_payload_size == 512)
504 			val = 2;
505 		else if (max_payload_size == 1024)
506 			val = 3;
507 		else {
508 			cmn_err(CE_WARN, "qlt(%d) malformed "
509 			    "pcie-max-payload-size in qlt.conf. Valid values "
510 			    "for this HBA are 128/256/512/1024",
511 			    instance);
512 			goto over_max_payload_setting;
513 		}
514 		mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
515 		mr = (uint16_t)(mr & 0xff1f);
516 		mr = (uint16_t)(mr | (val << 5));
517 		PCICFG_WR16(qlt, 0x54, mr);
518 	} else {
519 		cmn_err(CE_WARN, "qlt(%d): dont know how to set "
520 		    "pcie-max-payload-size for this device (%x)",
521 		    instance, did);
522 	}
523 
524 over_max_payload_setting:;
525 
526 	qlt_enable_intr(qlt);
527 
528 	if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
529 		goto attach_fail_10;
530 
531 	ddi_report_dev(dip);
532 	return (DDI_SUCCESS);
533 
534 attach_fail_10:;
535 	mutex_destroy(&qlt->qlt_ioctl_lock);
536 	cv_destroy(&qlt->mbox_cv);
537 	cv_destroy(&qlt->rp_dereg_cv);
538 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
539 attach_fail_9:;
540 	qlt_destroy_mutex(qlt);
541 	qlt_release_intr(qlt);
542 attach_fail_8:;
543 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
544 attach_fail_7:;
545 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
546 attach_fail_6:;
547 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
548 attach_fail_5:;
549 	ddi_regs_map_free(&qlt->regs_acc_handle);
550 attach_fail_4:;
551 	pci_config_teardown(&qlt->pcicfg_acc_handle);
552 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
553 	(void) qlt_el_trace_desc_dtor(qlt);
554 attach_fail_2:;
555 attach_fail_1:;
556 	ddi_soft_state_free(qlt_state, instance);
557 	return (DDI_FAILURE);
558 }
559 
560 #define	FCT_I_EVENT_BRING_PORT_OFFLINE	0x83
561 
562 /* ARGSUSED */
563 static int
qlt_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)564 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
565 {
566 	qlt_state_t *qlt;
567 
568 	int instance;
569 
570 	instance = ddi_get_instance(dip);
571 	if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
572 	    NULL) {
573 		return (DDI_FAILURE);
574 	}
575 
576 	if (qlt->fw_code01) {
577 		return (DDI_FAILURE);
578 	}
579 
580 	if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
581 	    qlt->qlt_state_not_acked) {
582 		return (DDI_FAILURE);
583 	}
584 	if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
585 		return (DDI_FAILURE);
586 	}
587 
588 	qlt_disable_intr(qlt);
589 
590 	ddi_remove_minor_node(dip, qlt->qlt_minor_name);
591 	qlt_destroy_mutex(qlt);
592 	qlt_release_intr(qlt);
593 	(void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
594 	ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
595 	ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
596 	ddi_regs_map_free(&qlt->regs_acc_handle);
597 	pci_config_teardown(&qlt->pcicfg_acc_handle);
598 	kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
599 	cv_destroy(&qlt->mbox_cv);
600 	cv_destroy(&qlt->rp_dereg_cv);
601 	(void) qlt_el_trace_desc_dtor(qlt);
602 	ddi_soft_state_free(qlt_state, instance);
603 
604 	return (DDI_SUCCESS);
605 }
606 
607 /*
608  * qlt_quiesce	quiesce a device attached to the system.
609  */
610 static int
qlt_quiesce(dev_info_t * dip)611 qlt_quiesce(dev_info_t *dip)
612 {
613 	qlt_state_t	*qlt;
614 	uint32_t	timer;
615 	uint32_t	stat;
616 
617 	qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
618 	if (qlt == NULL) {
619 		/* Oh well.... */
620 		return (DDI_SUCCESS);
621 	}
622 
623 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
624 	REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
625 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
626 	for (timer = 0; timer < 30000; timer++) {
627 		stat = REG_RD32(qlt, REG_RISC_STATUS);
628 		if (stat & RISC_HOST_INTR_REQUEST) {
629 			if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
630 				REG_WR32(qlt, REG_HCCR,
631 				    HCCR_CMD(CLEAR_RISC_PAUSE));
632 				break;
633 			}
634 			REG_WR32(qlt, REG_HCCR,
635 			    HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
636 		}
637 		drv_usecwait(100);
638 	}
639 	/* Reset the chip. */
640 	REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
641 	    PCI_X_XFER_CTRL);
642 	drv_usecwait(100);
643 
644 	qlt_disable_intr(qlt);
645 
646 	return (DDI_SUCCESS);
647 }
648 
649 static void
qlt_enable_intr(qlt_state_t * qlt)650 qlt_enable_intr(qlt_state_t *qlt)
651 {
652 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
653 		(void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
654 	} else {
655 		int i;
656 		for (i = 0; i < qlt->intr_cnt; i++)
657 			(void) ddi_intr_enable(qlt->htable[i]);
658 	}
659 	qlt->qlt_intr_enabled = 1;
660 }
661 
662 static void
qlt_disable_intr(qlt_state_t * qlt)663 qlt_disable_intr(qlt_state_t *qlt)
664 {
665 	if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
666 		(void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
667 	} else {
668 		int i;
669 		for (i = 0; i < qlt->intr_cnt; i++)
670 			(void) ddi_intr_disable(qlt->htable[i]);
671 	}
672 	qlt->qlt_intr_enabled = 0;
673 }
674 
675 static void
qlt_release_intr(qlt_state_t * qlt)676 qlt_release_intr(qlt_state_t *qlt)
677 {
678 	if (qlt->htable) {
679 		int i;
680 		for (i = 0; i < qlt->intr_cnt; i++) {
681 			(void) ddi_intr_remove_handler(qlt->htable[i]);
682 			(void) ddi_intr_free(qlt->htable[i]);
683 		}
684 		kmem_free(qlt->htable, (uint_t)qlt->intr_size);
685 	}
686 	qlt->htable = NULL;
687 	qlt->intr_pri = 0;
688 	qlt->intr_cnt = 0;
689 	qlt->intr_size = 0;
690 	qlt->intr_cap = 0;
691 }
692 
693 
694 static void
qlt_init_mutex(qlt_state_t * qlt)695 qlt_init_mutex(qlt_state_t *qlt)
696 {
697 	mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
698 	    INT2PTR(qlt->intr_pri, void *));
699 	mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
700 	    INT2PTR(qlt->intr_pri, void *));
701 	mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
702 	    INT2PTR(qlt->intr_pri, void *));
703 	mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
704 	    INT2PTR(qlt->intr_pri, void *));
705 }
706 
707 static void
qlt_destroy_mutex(qlt_state_t * qlt)708 qlt_destroy_mutex(qlt_state_t *qlt)
709 {
710 	mutex_destroy(&qlt->req_lock);
711 	mutex_destroy(&qlt->preq_lock);
712 	mutex_destroy(&qlt->mbox_lock);
713 	mutex_destroy(&qlt->intr_lock);
714 }
715 
716 
717 static int
qlt_setup_msix(qlt_state_t * qlt)718 qlt_setup_msix(qlt_state_t *qlt)
719 {
720 	int count, avail, actual;
721 	int ret;
722 	int itype = DDI_INTR_TYPE_MSIX;
723 	int i;
724 
725 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
726 	if (ret != DDI_SUCCESS || count == 0) {
727 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
728 		    count);
729 		return (DDI_FAILURE);
730 	}
731 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
732 	if (ret != DDI_SUCCESS || avail == 0) {
733 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
734 		    avail);
735 		return (DDI_FAILURE);
736 	}
737 	if (avail < count) {
738 		stmf_trace(qlt->qlt_port_alias,
739 		    "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
740 	}
741 
742 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
743 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
744 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
745 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
746 	/* we need at least 2 interrupt vectors */
747 	if (ret != DDI_SUCCESS || actual < 2) {
748 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
749 		    actual);
750 		ret = DDI_FAILURE;
751 		goto release_intr;
752 	}
753 	if (actual < count) {
754 		EL(qlt, "requested: %d, received: %d\n", count, actual);
755 	}
756 
757 	qlt->intr_cnt = actual;
758 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
759 	if (ret != DDI_SUCCESS) {
760 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
761 		ret = DDI_FAILURE;
762 		goto release_intr;
763 	}
764 	qlt_init_mutex(qlt);
765 	for (i = 0; i < actual; i++) {
766 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
767 		    qlt, INT2PTR((uint_t)i, void *));
768 		if (ret != DDI_SUCCESS) {
769 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
770 			goto release_mutex;
771 		}
772 	}
773 
774 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
775 	qlt->intr_flags |= QLT_INTR_MSIX;
776 	return (DDI_SUCCESS);
777 
778 release_mutex:
779 	qlt_destroy_mutex(qlt);
780 release_intr:
781 	for (i = 0; i < actual; i++)
782 		(void) ddi_intr_free(qlt->htable[i]);
783 #if 0
784 free_mem:
785 #endif
786 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
787 	qlt->htable = NULL;
788 	qlt_release_intr(qlt);
789 	return (ret);
790 }
791 
792 
793 static int
qlt_setup_msi(qlt_state_t * qlt)794 qlt_setup_msi(qlt_state_t *qlt)
795 {
796 	int count, avail, actual;
797 	int itype = DDI_INTR_TYPE_MSI;
798 	int ret;
799 	int i;
800 
801 	/* get the # of interrupts */
802 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
803 	if (ret != DDI_SUCCESS || count == 0) {
804 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
805 		    count);
806 		return (DDI_FAILURE);
807 	}
808 	ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
809 	if (ret != DDI_SUCCESS || avail == 0) {
810 		EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
811 		    avail);
812 		return (DDI_FAILURE);
813 	}
814 	if (avail < count) {
815 		EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
816 	}
817 	/* MSI requires only 1 interrupt. */
818 	count = 1;
819 
820 	/* allocate interrupt */
821 	qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
822 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
823 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
824 	    0, count, &actual, DDI_INTR_ALLOC_NORMAL);
825 	if (ret != DDI_SUCCESS || actual == 0) {
826 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
827 		    actual);
828 		ret = DDI_FAILURE;
829 		goto free_mem;
830 	}
831 	if (actual < count) {
832 		EL(qlt, "requested: %d, received: %d\n", count, actual);
833 	}
834 	qlt->intr_cnt = actual;
835 
836 	/*
837 	 * Get priority for first msi, assume remaining are all the same.
838 	 */
839 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
840 	if (ret != DDI_SUCCESS) {
841 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
842 		ret = DDI_FAILURE;
843 		goto release_intr;
844 	}
845 	qlt_init_mutex(qlt);
846 
847 	/* add handler */
848 	for (i = 0; i < actual; i++) {
849 		ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
850 		    qlt, INT2PTR((uint_t)i, void *));
851 		if (ret != DDI_SUCCESS) {
852 			EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
853 			goto release_mutex;
854 		}
855 	}
856 
857 	(void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
858 	qlt->intr_flags |= QLT_INTR_MSI;
859 	return (DDI_SUCCESS);
860 
861 release_mutex:
862 	qlt_destroy_mutex(qlt);
863 release_intr:
864 	for (i = 0; i < actual; i++)
865 		(void) ddi_intr_free(qlt->htable[i]);
866 free_mem:
867 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
868 	qlt->htable = NULL;
869 	qlt_release_intr(qlt);
870 	return (ret);
871 }
872 
873 static int
qlt_setup_fixed(qlt_state_t * qlt)874 qlt_setup_fixed(qlt_state_t *qlt)
875 {
876 	int count;
877 	int actual;
878 	int ret;
879 	int itype = DDI_INTR_TYPE_FIXED;
880 
881 	ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
882 	/* Fixed interrupts can only have one interrupt. */
883 	if (ret != DDI_SUCCESS || count != 1) {
884 		EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
885 		    count);
886 		return (DDI_FAILURE);
887 	}
888 
889 	qlt->intr_size = sizeof (ddi_intr_handle_t);
890 	qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
891 	ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
892 	    DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
893 	if (ret != DDI_SUCCESS || actual != 1) {
894 		EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
895 		    actual);
896 		ret = DDI_FAILURE;
897 		goto free_mem;
898 	}
899 
900 	qlt->intr_cnt = actual;
901 	ret =  ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
902 	if (ret != DDI_SUCCESS) {
903 		EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
904 		ret = DDI_FAILURE;
905 		goto release_intr;
906 	}
907 	qlt_init_mutex(qlt);
908 	ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
909 	if (ret != DDI_SUCCESS) {
910 		EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
911 		goto release_mutex;
912 	}
913 
914 	qlt->intr_flags |= QLT_INTR_FIXED;
915 	return (DDI_SUCCESS);
916 
917 release_mutex:
918 	qlt_destroy_mutex(qlt);
919 release_intr:
920 	(void) ddi_intr_free(qlt->htable[0]);
921 free_mem:
922 	kmem_free(qlt->htable, (uint_t)qlt->intr_size);
923 	qlt->htable = NULL;
924 	qlt_release_intr(qlt);
925 	return (ret);
926 }
927 
928 static int
qlt_setup_interrupts(qlt_state_t * qlt)929 qlt_setup_interrupts(qlt_state_t *qlt)
930 {
931 	int itypes = 0;
932 
933 /*
934  * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
935  */
936 #ifndef __sparc
937 	if (qlt_enable_msi != 0) {
938 #endif
939 	if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
940 		itypes = DDI_INTR_TYPE_FIXED;
941 	}
942 
943 	if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
944 		if (qlt_setup_msix(qlt) == DDI_SUCCESS)
945 			return (DDI_SUCCESS);
946 	}
947 
948 	if (itypes & DDI_INTR_TYPE_MSI) {
949 		if (qlt_setup_msi(qlt) == DDI_SUCCESS)
950 			return (DDI_SUCCESS);
951 	}
952 #ifndef __sparc
953 	}
954 #endif
955 	return (qlt_setup_fixed(qlt));
956 }
957 
958 /*
959  * Filling the hba attributes
960  */
961 void
qlt_populate_hba_fru_details(struct fct_local_port * port,struct fct_port_attrs * port_attrs)962 qlt_populate_hba_fru_details(struct fct_local_port *port,
963     struct fct_port_attrs *port_attrs)
964 {
965 	caddr_t	bufp;
966 	int len;
967 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
968 
969 	(void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
970 	    "QLogic Corp.");
971 	(void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
972 	    "%s", QLT_NAME);
973 	(void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
974 	    "%s", QLT_VERSION);
975 	port_attrs->serial_number[0] = '\0';
976 	port_attrs->hardware_version[0] = '\0';
977 
978 	(void) snprintf(port_attrs->firmware_version,
979 	    FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
980 	    qlt->fw_minor, qlt->fw_subminor);
981 
982 	/* Get FCode version */
983 	if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
984 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
985 	    (int *)&len) == DDI_PROP_SUCCESS) {
986 		(void) snprintf(port_attrs->option_rom_version,
987 		    FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
988 		kmem_free(bufp, (uint_t)len);
989 		bufp = NULL;
990 	} else {
991 #ifdef __sparc
992 		(void) snprintf(port_attrs->option_rom_version,
993 		    FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
994 #else
995 		(void) snprintf(port_attrs->option_rom_version,
996 		    FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
997 #endif
998 	}
999 	port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
1000 	    qlt->nvram->subsystem_vendor_id[1] << 8;
1001 
1002 	port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1003 	    qlt->nvram->max_frame_length[0];
1004 
1005 	port_attrs->supported_cos = 0x10000000;
1006 	port_attrs->supported_speed = PORT_SPEED_1G |
1007 	    PORT_SPEED_2G | PORT_SPEED_4G;
1008 	if (qlt->qlt_25xx_chip)
1009 		port_attrs->supported_speed = PORT_SPEED_2G | PORT_SPEED_4G |
1010 		    PORT_SPEED_8G;
1011 	if (qlt->qlt_81xx_chip)
1012 		port_attrs->supported_speed = PORT_SPEED_10G;
1013 
1014 	/* limit string length to nvr model_name length */
1015 	len = (qlt->qlt_81xx_chip) ? 16 : 8;
1016 	(void) snprintf(port_attrs->model,
1017 	    (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1018 	    "%s", qlt->nvram->model_name);
1019 
1020 	(void) snprintf(port_attrs->model_description,
1021 	    (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1022 	    FCHBA_MODEL_DESCRIPTION_LEN),
1023 	    "%s", qlt->nvram->model_name);
1024 }
1025 
1026 /* ARGSUSED */
1027 fct_status_t
qlt_info(uint32_t cmd,fct_local_port_t * port,void * arg,uint8_t * buf,uint32_t * bufsizep)1028 qlt_info(uint32_t cmd, fct_local_port_t *port,
1029     void *arg, uint8_t *buf, uint32_t *bufsizep)
1030 {
1031 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
1032 	mbox_cmd_t	*mcp;
1033 	fct_status_t	ret = FCT_SUCCESS;
1034 	uint8_t		*p;
1035 	fct_port_link_status_t	*link_status;
1036 
1037 	switch (cmd) {
1038 	case FC_TGT_PORT_RLS:
1039 		if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1040 			EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1041 			    "fct_port_link_status_t=%xh\n", *bufsizep,
1042 			    sizeof (fct_port_link_status_t));
1043 			ret = FCT_FAILURE;
1044 			break;
1045 		}
1046 		/* send mailbox command to get link status */
1047 		mcp = qlt_alloc_mailbox_command(qlt, 156);
1048 		if (mcp == NULL) {
1049 			EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1050 			ret = FCT_ALLOC_FAILURE;
1051 			break;
1052 		}
1053 
1054 		/* GET LINK STATUS count */
1055 		mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1056 		mcp->to_fw[8] = 156/4;
1057 		mcp->to_fw_mask |= BIT_1 | BIT_8;
1058 		mcp->from_fw_mask |= BIT_1 | BIT_2;
1059 
1060 		ret = qlt_mailbox_command(qlt, mcp);
1061 		if (ret != QLT_SUCCESS) {
1062 			EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1063 			qlt_free_mailbox_command(qlt, mcp);
1064 			break;
1065 		}
1066 		qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1067 
1068 		p = mcp->dbuf->db_sglist[0].seg_addr;
1069 		link_status = (fct_port_link_status_t *)buf;
1070 		link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1071 		link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1072 		link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1073 		link_status->PrimitiveSeqProtocolErrorCount =
1074 		    LE_32(*((uint32_t *)(p + 12)));
1075 		link_status->InvalidTransmissionWordCount =
1076 		    LE_32(*((uint32_t *)(p + 16)));
1077 		link_status->InvalidCRCCount =
1078 		    LE_32(*((uint32_t *)(p + 20)));
1079 
1080 		qlt_free_mailbox_command(qlt, mcp);
1081 		break;
1082 	default:
1083 		EL(qlt, "Unknown cmd=%xh\n", cmd);
1084 		ret = FCT_FAILURE;
1085 		break;
1086 	}
1087 	return (ret);
1088 }
1089 
1090 fct_status_t
qlt_port_start(caddr_t arg)1091 qlt_port_start(caddr_t arg)
1092 {
1093 	qlt_state_t *qlt = (qlt_state_t *)arg;
1094 	fct_local_port_t *port;
1095 	fct_dbuf_store_t *fds;
1096 	fct_status_t ret;
1097 
1098 	if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1099 		return (FCT_FAILURE);
1100 	}
1101 	/* Initialize the ddi_dma_handle free pool */
1102 	qlt_dma_handle_pool_init(qlt);
1103 
1104 	port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1105 	if (port == NULL) {
1106 		goto qlt_pstart_fail_1;
1107 	}
1108 	fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1109 	if (fds == NULL) {
1110 		goto qlt_pstart_fail_2;
1111 	}
1112 	qlt->qlt_port = port;
1113 	fds->fds_alloc_data_buf = qlt_dmem_alloc;
1114 	fds->fds_free_data_buf = qlt_dmem_free;
1115 	fds->fds_setup_dbuf = qlt_dma_setup_dbuf;
1116 	fds->fds_teardown_dbuf = qlt_dma_teardown_dbuf;
1117 	fds->fds_max_sgl_xfer_len = QLT_DMA_SG_LIST_LENGTH * MMU_PAGESIZE;
1118 	fds->fds_copy_threshold = MMU_PAGESIZE;
1119 	fds->fds_fca_private = (void *)qlt;
1120 	/*
1121 	 * Since we keep everything in the state struct and dont allocate any
1122 	 * port private area, just use that pointer to point to the
1123 	 * state struct.
1124 	 */
1125 	port->port_fca_private = qlt;
1126 	port->port_fca_abort_timeout = 5 * 1000;	/* 5 seconds */
1127 	bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1128 	bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1129 	fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1130 	fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1131 	port->port_default_alias = qlt->qlt_port_alias;
1132 	port->port_pp = qlt_pp;
1133 	port->port_fds = fds;
1134 	port->port_max_logins = QLT_MAX_LOGINS;
1135 	port->port_max_xchges = QLT_MAX_XCHGES;
1136 	port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1137 	port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1138 	port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1139 	port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1140 	port->port_get_link_info = qlt_get_link_info;
1141 	port->port_register_remote_port = qlt_register_remote_port;
1142 	port->port_deregister_remote_port = qlt_deregister_remote_port;
1143 	port->port_send_cmd = qlt_send_cmd;
1144 	port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1145 	port->port_send_cmd_response = qlt_send_cmd_response;
1146 	port->port_abort_cmd = qlt_abort_cmd;
1147 	port->port_ctl = qlt_ctl;
1148 	port->port_flogi_xchg = qlt_do_flogi;
1149 	port->port_populate_hba_details = qlt_populate_hba_fru_details;
1150 	port->port_info = qlt_info;
1151 	port->port_fca_version = FCT_FCA_MODREV_1;
1152 
1153 	if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1154 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1155 		goto qlt_pstart_fail_2_5;
1156 	}
1157 
1158 	return (QLT_SUCCESS);
1159 #if 0
1160 qlt_pstart_fail_3:
1161 	(void) fct_deregister_local_port(port);
1162 #endif
1163 qlt_pstart_fail_2_5:
1164 	fct_free(fds);
1165 qlt_pstart_fail_2:
1166 	fct_free(port);
1167 	qlt->qlt_port = NULL;
1168 qlt_pstart_fail_1:
1169 	qlt_dma_handle_pool_fini(qlt);
1170 	qlt_dmem_fini(qlt);
1171 	return (QLT_FAILURE);
1172 }
1173 
1174 fct_status_t
qlt_port_stop(caddr_t arg)1175 qlt_port_stop(caddr_t arg)
1176 {
1177 	qlt_state_t *qlt = (qlt_state_t *)arg;
1178 	fct_status_t ret;
1179 
1180 	if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1181 		EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1182 		return (QLT_FAILURE);
1183 	}
1184 	fct_free(qlt->qlt_port->port_fds);
1185 	fct_free(qlt->qlt_port);
1186 	qlt->qlt_port = NULL;
1187 	qlt_dma_handle_pool_fini(qlt);
1188 	qlt_dmem_fini(qlt);
1189 	return (QLT_SUCCESS);
1190 }
1191 
1192 /*
1193  * Called by framework to init the HBA.
1194  * Can be called in the middle of I/O. (Why ??)
1195  * Should make sure sane state both before and after the initialization
1196  */
1197 fct_status_t
qlt_port_online(qlt_state_t * qlt)1198 qlt_port_online(qlt_state_t *qlt)
1199 {
1200 	uint64_t	da;
1201 	int		instance, i;
1202 	fct_status_t	ret;
1203 	uint16_t	rcount;
1204 	caddr_t		icb;
1205 	mbox_cmd_t	*mcp;
1206 	uint8_t		*elsbmp;
1207 
1208 	instance = ddi_get_instance(qlt->dip);
1209 
1210 	/* XXX Make sure a sane state */
1211 
1212 	if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
1213 		cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
1214 		return (ret);
1215 	}
1216 
1217 	bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1218 
1219 	/* Get resource count */
1220 	REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
1221 	ret = qlt_raw_mailbox_command(qlt);
1222 	rcount = REG_RD16(qlt, REG_MBOX(3));
1223 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1224 	if (ret != QLT_SUCCESS) {
1225 		EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1226 		return (ret);
1227 	}
1228 
1229 	/* Enable PUREX */
1230 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
1231 	REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
1232 	REG_WR16(qlt, REG_MBOX(2), 0x0);
1233 	REG_WR16(qlt, REG_MBOX(3), 0x0);
1234 	ret = qlt_raw_mailbox_command(qlt);
1235 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1236 	if (ret != QLT_SUCCESS) {
1237 		EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1238 		cmn_err(CE_NOTE, "Enable PUREX failed");
1239 		return (ret);
1240 	}
1241 
1242 	/* Pass ELS bitmap to fw */
1243 	REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
1244 	REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
1245 	elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
1246 	bzero(elsbmp, 32);
1247 	da = qlt->queue_mem_cookie.dmac_laddress;
1248 	da += MBOX_DMA_MEM_OFFSET;
1249 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
1250 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
1251 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
1252 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
1253 	SETELSBIT(elsbmp, ELS_OP_PLOGI);
1254 	SETELSBIT(elsbmp, ELS_OP_LOGO);
1255 	SETELSBIT(elsbmp, ELS_OP_ABTX);
1256 	SETELSBIT(elsbmp, ELS_OP_ECHO);
1257 	SETELSBIT(elsbmp, ELS_OP_PRLI);
1258 	SETELSBIT(elsbmp, ELS_OP_PRLO);
1259 	SETELSBIT(elsbmp, ELS_OP_SCN);
1260 	SETELSBIT(elsbmp, ELS_OP_TPRLO);
1261 	SETELSBIT(elsbmp, ELS_OP_PDISC);
1262 	SETELSBIT(elsbmp, ELS_OP_ADISC);
1263 	SETELSBIT(elsbmp, ELS_OP_RSCN);
1264 	SETELSBIT(elsbmp, ELS_OP_RNID);
1265 	(void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1266 	    DDI_DMA_SYNC_FORDEV);
1267 	ret = qlt_raw_mailbox_command(qlt);
1268 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1269 	if (ret != QLT_SUCCESS) {
1270 		EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1271 		cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1272 		    "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1273 		    elsbmp[1]);
1274 		return (ret);
1275 	}
1276 
1277 	/* Init queue pointers */
1278 	REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1279 	REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1280 	REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1281 	REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1282 	REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1283 	REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1284 	REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1285 	REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1286 	qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1287 	qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1288 	qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
1289 	qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1290 	qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1291 
1292 	/*
1293 	 * XXX support for tunables. Also should we cache icb ?
1294 	 */
1295 	if (qlt->qlt_81xx_chip) {
1296 	    /* allocate extra 64 bytes for Extended init control block */
1297 		mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
1298 	} else {
1299 		mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1300 	}
1301 	if (mcp == NULL) {
1302 		EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1303 		return (STMF_ALLOC_FAILURE);
1304 	}
1305 	icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1306 	if (qlt->qlt_81xx_chip) {
1307 		bzero(icb, 0xC0);
1308 	} else {
1309 		bzero(icb, 0x80);
1310 	}
1311 	da = qlt->queue_mem_cookie.dmac_laddress;
1312 	DMEM_WR16(qlt, icb, 1);		/* Version */
1313 	DMEM_WR16(qlt, icb+4, 2112);	/* Max frame length */
1314 	DMEM_WR16(qlt, icb+6, 16);	/* Execution throttle */
1315 	DMEM_WR16(qlt, icb+8, rcount);	/* Xchg count */
1316 	DMEM_WR16(qlt, icb+0x0a, 0x00);	/* Hard address (not used) */
1317 	bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1318 	bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1319 	DMEM_WR16(qlt, icb+0x20, 3);	/* Login retry count */
1320 	DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1321 	DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1322 	if (!qlt->qlt_81xx_chip) {
1323 		DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1324 	}
1325 	DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
1326 	DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1327 	DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1328 	DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
1329 	DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1330 	DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1331 	DMEM_WR16(qlt, icb+0x58, 2);	/* Interrupt delay Timer */
1332 	DMEM_WR16(qlt, icb+0x5a, 4);	/* Login timeout (secs) */
1333 	if (qlt->qlt_81xx_chip) {
1334 		qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1335 
1336 		DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1337 		DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
1338 		DMEM_WR32(qlt, icb+0x70,
1339 		    qlt81nvr->enode_mac[0] |
1340 		    (qlt81nvr->enode_mac[1] << 8) |
1341 		    (qlt81nvr->enode_mac[2] << 16) |
1342 		    (qlt81nvr->enode_mac[3] << 24));
1343 		DMEM_WR16(qlt, icb+0x74,
1344 		    qlt81nvr->enode_mac[4] |
1345 		    (qlt81nvr->enode_mac[5] << 8));
1346 		} else {
1347 			DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1348 			    BIT_2 | BIT_1 | BIT_0);
1349 			DMEM_WR32(qlt, icb+0x60, BIT_5);
1350 			DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1351 			    BIT_4);
1352 		}
1353 
1354 	if (qlt->qlt_81xx_chip) {
1355 		qlt_dmem_bctl_t		*bctl;
1356 		uint32_t		index;
1357 		caddr_t			src;
1358 		caddr_t			dst;
1359 		qlt_nvram_81xx_t	*qlt81nvr;
1360 
1361 		dst = icb+0x80;
1362 		qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1363 		src = (caddr_t)&qlt81nvr->ext_blk;
1364 		index = sizeof (qlt_ext_icb_81xx_t);
1365 
1366 		/* Use defaults for cases where we find nothing in NVR */
1367 		if (*src == 0) {
1368 			EL(qlt, "nvram eicb=null\n");
1369 			cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1370 			    instance);
1371 			qlt81nvr->ext_blk.version[0] = 1;
1372 /*
1373  * not yet, for !FIP firmware at least
1374  *
1375  *                qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1376  */
1377 #ifdef _LITTLE_ENDIAN
1378 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1379 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1380 #else
1381 			qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1382 			qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1383 #endif
1384 		}
1385 
1386 		while (index--) {
1387 			*dst++ = *src++;
1388 		}
1389 
1390 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1391 		da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
1392 
1393 		mcp->to_fw[11] = LSW(LSD(da));
1394 		mcp->to_fw[10] = MSW(LSD(da));
1395 		mcp->to_fw[13] = LSW(MSD(da));
1396 		mcp->to_fw[12] = MSW(MSD(da));
1397 		mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1398 		    0xffff);
1399 
1400 		/* eicb enable */
1401 		mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1402 		mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1403 		    BIT_1;
1404 	}
1405 
1406 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1407 	mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
1408 
1409 	/*
1410 	 * This is the 1st command after adapter initialize which will
1411 	 * use interrupts and regular mailbox interface.
1412 	 */
1413 	qlt->mbox_io_state = MBOX_STATE_READY;
1414 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1415 	/* Issue mailbox to firmware */
1416 	ret = qlt_mailbox_command(qlt, mcp);
1417 	if (ret != QLT_SUCCESS) {
1418 		EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
1419 		cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1420 		    instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
1421 	}
1422 
1423 	mcp->to_fw_mask = BIT_0;
1424 	mcp->from_fw_mask = BIT_0 | BIT_1;
1425 	mcp->to_fw[0] = 0x28;
1426 	ret = qlt_mailbox_command(qlt, mcp);
1427 	if (ret != QLT_SUCCESS) {
1428 		EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
1429 		cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1430 		    (long long)ret);
1431 	}
1432 
1433 	/*
1434 	 * Report FW versions for 81xx - MPI rev is useful
1435 	 */
1436 	if (qlt->qlt_81xx_chip) {
1437 		mcp->to_fw_mask = BIT_0;
1438 		mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1439 		    BIT_0;
1440 		mcp->to_fw[0] = 0x8;
1441 		ret = qlt_mailbox_command(qlt, mcp);
1442 		if (ret != QLT_SUCCESS) {
1443 			EL(qlt, "about fw failed: %llx\n", (long long)ret);
1444 		} else {
1445 			EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1446 			    mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1447 			    mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1448 			    mcp->from_fw[11] & 0xff);
1449 		}
1450 	}
1451 
1452 	qlt_free_mailbox_command(qlt, mcp);
1453 
1454 	for (i = 0; i < 5; i++) {
1455 		qlt->qlt_bufref[i] = 0;
1456 	}
1457 	qlt->qlt_bumpbucket = 0;
1458 	qlt->qlt_pmintry = 0;
1459 	qlt->qlt_pmin_ok = 0;
1460 
1461 	if (ret != QLT_SUCCESS)
1462 		return (ret);
1463 	return (FCT_SUCCESS);
1464 }
1465 
1466 fct_status_t
qlt_port_offline(qlt_state_t * qlt)1467 qlt_port_offline(qlt_state_t *qlt)
1468 {
1469 	int		retries;
1470 
1471 	mutex_enter(&qlt->mbox_lock);
1472 
1473 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1474 		mutex_exit(&qlt->mbox_lock);
1475 		goto poff_mbox_done;
1476 	}
1477 
1478 	/* Wait to grab the mailboxes */
1479 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1480 	    retries++) {
1481 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1482 		if ((retries > 5) ||
1483 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
1484 			qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1485 			mutex_exit(&qlt->mbox_lock);
1486 			goto poff_mbox_done;
1487 		}
1488 	}
1489 	qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1490 	mutex_exit(&qlt->mbox_lock);
1491 poff_mbox_done:;
1492 	qlt->intr_sneak_counter = 10;
1493 	mutex_enter(&qlt->intr_lock);
1494 	(void) qlt_reset_chip(qlt);
1495 	drv_usecwait(20);
1496 	qlt->intr_sneak_counter = 0;
1497 	mutex_exit(&qlt->intr_lock);
1498 
1499 	return (FCT_SUCCESS);
1500 }
1501 
1502 static fct_status_t
qlt_get_link_info(fct_local_port_t * port,fct_link_info_t * li)1503 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1504 {
1505 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1506 	mbox_cmd_t *mcp;
1507 	fct_status_t fc_ret;
1508 	fct_status_t ret;
1509 	clock_t et;
1510 
1511 	et = ddi_get_lbolt() + drv_usectohz(5000000);
1512 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1513 link_info_retry:
1514 	mcp->to_fw[0] = MBC_GET_ID;
1515 	mcp->to_fw[9] = 0;
1516 	mcp->to_fw_mask |= BIT_0 | BIT_9;
1517 	mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1518 	/* Issue mailbox to firmware */
1519 	ret = qlt_mailbox_command(qlt, mcp);
1520 	if (ret != QLT_SUCCESS) {
1521 		EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1522 		if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
1523 			/* Firmware is not ready */
1524 			if (ddi_get_lbolt() < et) {
1525 				delay(drv_usectohz(50000));
1526 				goto link_info_retry;
1527 			}
1528 		}
1529 		stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1530 		    "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1531 		fc_ret = FCT_FAILURE;
1532 	} else {
1533 		li->portid = ((uint32_t)(mcp->from_fw[2])) |
1534 		    (((uint32_t)(mcp->from_fw[3])) << 16);
1535 
1536 		li->port_speed = qlt->link_speed;
1537 		switch (mcp->from_fw[6]) {
1538 		case 1:
1539 			li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1540 			li->port_fca_flogi_done = 1;
1541 			break;
1542 		case 0:
1543 			li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1544 			li->port_no_fct_flogi = 1;
1545 			break;
1546 		case 3:
1547 			li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1548 			li->port_fca_flogi_done = 1;
1549 			break;
1550 		case 2: /*FALLTHROUGH*/
1551 		case 4:
1552 			li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1553 			li->port_fca_flogi_done = 1;
1554 			break;
1555 		default:
1556 			li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1557 			EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1558 		}
1559 		qlt->cur_topology = li->port_topology;
1560 		fc_ret = FCT_SUCCESS;
1561 	}
1562 	qlt_free_mailbox_command(qlt, mcp);
1563 
1564 	if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1565 		mcp = qlt_alloc_mailbox_command(qlt, 64);
1566 		mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
1567 		mcp->to_fw[1] = 0x7FE;
1568 		mcp->to_fw[9] = 0;
1569 		mcp->to_fw[10] = 0;
1570 		mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1571 		fc_ret = qlt_mailbox_command(qlt, mcp);
1572 		if (fc_ret != QLT_SUCCESS) {
1573 			EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
1574 			    fc_ret);
1575 			stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1576 			    "database for F_port failed, ret = %llx", fc_ret);
1577 		} else {
1578 			uint8_t *p;
1579 
1580 			qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1581 			p = mcp->dbuf->db_sglist[0].seg_addr;
1582 			bcopy(p + 0x18, li->port_rpwwn, 8);
1583 			bcopy(p + 0x20, li->port_rnwwn, 8);
1584 		}
1585 		qlt_free_mailbox_command(qlt, mcp);
1586 	}
1587 	return (fc_ret);
1588 }
1589 
1590 static int
qlt_open(dev_t * devp,int flag,int otype,cred_t * credp)1591 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1592 {
1593 	int		instance;
1594 	qlt_state_t	*qlt;
1595 
1596 	if (otype != OTYP_CHR) {
1597 		return (EINVAL);
1598 	}
1599 
1600 	/*
1601 	 * Since this is for debugging only, only allow root to issue ioctl now
1602 	 */
1603 	if (drv_priv(credp)) {
1604 		return (EPERM);
1605 	}
1606 
1607 	instance = (int)getminor(*devp);
1608 	qlt = ddi_get_soft_state(qlt_state, instance);
1609 	if (qlt == NULL) {
1610 		return (ENXIO);
1611 	}
1612 
1613 	mutex_enter(&qlt->qlt_ioctl_lock);
1614 	if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1615 		/*
1616 		 * It is already open for exclusive access.
1617 		 * So shut the door on this caller.
1618 		 */
1619 		mutex_exit(&qlt->qlt_ioctl_lock);
1620 		return (EBUSY);
1621 	}
1622 
1623 	if (flag & FEXCL) {
1624 		if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1625 			/*
1626 			 * Exclusive operation not possible
1627 			 * as it is already opened
1628 			 */
1629 			mutex_exit(&qlt->qlt_ioctl_lock);
1630 			return (EBUSY);
1631 		}
1632 		qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1633 	}
1634 	qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1635 	mutex_exit(&qlt->qlt_ioctl_lock);
1636 
1637 	return (0);
1638 }
1639 
1640 /* ARGSUSED */
1641 static int
qlt_close(dev_t dev,int flag,int otype,cred_t * credp)1642 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1643 {
1644 	int		instance;
1645 	qlt_state_t	*qlt;
1646 
1647 	if (otype != OTYP_CHR) {
1648 		return (EINVAL);
1649 	}
1650 
1651 	instance = (int)getminor(dev);
1652 	qlt = ddi_get_soft_state(qlt_state, instance);
1653 	if (qlt == NULL) {
1654 		return (ENXIO);
1655 	}
1656 
1657 	mutex_enter(&qlt->qlt_ioctl_lock);
1658 	if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1659 		mutex_exit(&qlt->qlt_ioctl_lock);
1660 		return (ENODEV);
1661 	}
1662 
1663 	/*
1664 	 * It looks there's one hole here, maybe there could several concurrent
1665 	 * shareed open session, but we never check this case.
1666 	 * But it will not hurt too much, disregard it now.
1667 	 */
1668 	qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1669 	mutex_exit(&qlt->qlt_ioctl_lock);
1670 
1671 	return (0);
1672 }
1673 
1674 /*
1675  * All of these ioctls are unstable interfaces which are meant to be used
1676  * in a controlled lab env. No formal testing will be (or needs to be) done
1677  * for these ioctls. Specially note that running with an additional
1678  * uploaded firmware is not supported and is provided here for test
1679  * purposes only.
1680  */
1681 /* ARGSUSED */
1682 static int
qlt_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)1683 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1684     cred_t *credp, int *rval)
1685 {
1686 	qlt_state_t	*qlt;
1687 	int		ret = 0;
1688 #ifdef _LITTLE_ENDIAN
1689 	int		i;
1690 #endif
1691 	stmf_iocdata_t	*iocd;
1692 	void		*ibuf = NULL;
1693 	void		*obuf = NULL;
1694 	uint32_t	*intp;
1695 	qlt_fw_info_t	*fwi;
1696 	mbox_cmd_t	*mcp;
1697 	fct_status_t	st;
1698 	char		info[QLT_INFO_LEN];
1699 	fct_status_t	ret2;
1700 
1701 	if (drv_priv(credp) != 0)
1702 		return (EPERM);
1703 
1704 	qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1705 	ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1706 	if (ret)
1707 		return (ret);
1708 	iocd->stmf_error = 0;
1709 
1710 	switch (cmd) {
1711 	case QLT_IOCTL_FETCH_FWDUMP:
1712 		if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1713 			EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1714 			    iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1715 			ret = EINVAL;
1716 			break;
1717 		}
1718 		mutex_enter(&qlt->qlt_ioctl_lock);
1719 		if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1720 			mutex_exit(&qlt->qlt_ioctl_lock);
1721 			ret = ENODATA;
1722 			EL(qlt, "no fwdump\n");
1723 			iocd->stmf_error = QLTIO_NO_DUMP;
1724 			break;
1725 		}
1726 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1727 			mutex_exit(&qlt->qlt_ioctl_lock);
1728 			ret = EBUSY;
1729 			EL(qlt, "fwdump inprogress\n");
1730 			iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1731 			break;
1732 		}
1733 		if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1734 			mutex_exit(&qlt->qlt_ioctl_lock);
1735 			ret = EEXIST;
1736 			EL(qlt, "fwdump already fetched\n");
1737 			iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1738 			break;
1739 		}
1740 		bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1741 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
1742 		mutex_exit(&qlt->qlt_ioctl_lock);
1743 
1744 		break;
1745 
1746 	case QLT_IOCTL_TRIGGER_FWDUMP:
1747 		if (qlt->qlt_state != FCT_STATE_ONLINE) {
1748 			ret = EACCES;
1749 			iocd->stmf_error = QLTIO_NOT_ONLINE;
1750 			break;
1751 		}
1752 		(void) snprintf(info, sizeof (info), "qlt_ioctl: qlt-%p, "
1753 		    "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
1754 		if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1755 		    STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1756 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1757 			EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1758 			    "%llxh\n", ret2);
1759 			ret = EIO;
1760 		}
1761 		break;
1762 	case QLT_IOCTL_UPLOAD_FW:
1763 		if ((iocd->stmf_ibuf_size < 1024) ||
1764 		    (iocd->stmf_ibuf_size & 3)) {
1765 			EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1766 			    iocd->stmf_ibuf_size);
1767 			ret = EINVAL;
1768 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1769 			break;
1770 		}
1771 		intp = (uint32_t *)ibuf;
1772 #ifdef _LITTLE_ENDIAN
1773 		for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1774 			intp[i] = BSWAP_32(intp[i]);
1775 		}
1776 #endif
1777 		if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1778 		    (((intp[intp[3] + 3] + intp[3]) << 2) !=
1779 		    iocd->stmf_ibuf_size)) {
1780 			EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1781 			    iocd->stmf_ibuf_size);
1782 			ret = EINVAL;
1783 			iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1784 			break;
1785 		}
1786 		if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1787 		    (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1788 		    (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
1789 		    ((intp[8] & 3) == 0))) {
1790 			EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1791 			ret = EACCES;
1792 			iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1793 			break;
1794 		}
1795 
1796 		/* Everything looks ok, lets copy this firmware */
1797 		if (qlt->fw_code01) {
1798 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1799 			    qlt->fw_length02) << 2);
1800 			qlt->fw_code01 = NULL;
1801 		} else {
1802 			atomic_inc_32(&qlt_loaded_counter);
1803 		}
1804 		qlt->fw_length01 = intp[3];
1805 		qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1806 		    KM_SLEEP);
1807 		bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1808 		qlt->fw_addr01 = intp[2];
1809 		qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1810 		qlt->fw_addr02 = qlt->fw_code02[2];
1811 		qlt->fw_length02 = qlt->fw_code02[3];
1812 		break;
1813 
1814 	case QLT_IOCTL_CLEAR_FW:
1815 		if (qlt->fw_code01) {
1816 			kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1817 			    qlt->fw_length02) << 2);
1818 			qlt->fw_code01 = NULL;
1819 			atomic_dec_32(&qlt_loaded_counter);
1820 		}
1821 		break;
1822 
1823 	case QLT_IOCTL_GET_FW_INFO:
1824 		if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1825 			EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1826 			    iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1827 			ret = EINVAL;
1828 			break;
1829 		}
1830 		fwi = (qlt_fw_info_t *)obuf;
1831 		if (qlt->qlt_stay_offline) {
1832 			fwi->fwi_stay_offline = 1;
1833 		}
1834 		if (qlt->qlt_state == FCT_STATE_ONLINE) {
1835 			fwi->fwi_port_active = 1;
1836 		}
1837 		fwi->fwi_active_major = qlt->fw_major;
1838 		fwi->fwi_active_minor = qlt->fw_minor;
1839 		fwi->fwi_active_subminor = qlt->fw_subminor;
1840 		fwi->fwi_active_attr = qlt->fw_attr;
1841 		if (qlt->fw_code01) {
1842 			fwi->fwi_fw_uploaded = 1;
1843 			fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1844 			fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1845 			fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1846 			fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1847 		}
1848 		if (qlt->qlt_81xx_chip) {
1849 			fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1850 			fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1851 			fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1852 			fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1853 		} else if (qlt->qlt_25xx_chip) {
1854 			fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1855 			fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1856 			fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1857 			fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1858 		} else {
1859 			fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1860 			fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1861 			fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1862 			fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1863 		}
1864 		break;
1865 
1866 	case QLT_IOCTL_STAY_OFFLINE:
1867 		if (!iocd->stmf_ibuf_size) {
1868 			EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1869 			    iocd->stmf_ibuf_size);
1870 			ret = EINVAL;
1871 			break;
1872 		}
1873 		if (*((char *)ibuf)) {
1874 			qlt->qlt_stay_offline = 1;
1875 		} else {
1876 			qlt->qlt_stay_offline = 0;
1877 		}
1878 		break;
1879 
1880 	case QLT_IOCTL_MBOX:
1881 		if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1882 		    (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1883 			EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1884 			    iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1885 			ret = EINVAL;
1886 			break;
1887 		}
1888 		mcp = qlt_alloc_mailbox_command(qlt, 0);
1889 		if (mcp == NULL) {
1890 			EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1891 			ret = ENOMEM;
1892 			break;
1893 		}
1894 		bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1895 		st = qlt_mailbox_command(qlt, mcp);
1896 		bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1897 		qlt_free_mailbox_command(qlt, mcp);
1898 		if (st != QLT_SUCCESS) {
1899 			if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1900 				st = QLT_SUCCESS;
1901 		}
1902 		if (st != QLT_SUCCESS) {
1903 			EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1904 			ret = EIO;
1905 			switch (st) {
1906 			case QLT_MBOX_NOT_INITIALIZED:
1907 				iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1908 				break;
1909 			case QLT_MBOX_BUSY:
1910 				iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1911 				break;
1912 			case QLT_MBOX_TIMEOUT:
1913 				iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1914 				break;
1915 			case QLT_MBOX_ABORTED:
1916 				iocd->stmf_error = QLTIO_MBOX_ABORTED;
1917 				break;
1918 			}
1919 		}
1920 		break;
1921 
1922 	case QLT_IOCTL_ELOG:
1923 		qlt_dump_el_trace_buffer(qlt);
1924 		break;
1925 
1926 	default:
1927 		EL(qlt, "Unknown ioctl-%xh\n", cmd);
1928 		ret = ENOTTY;
1929 	}
1930 
1931 	if (ret == 0) {
1932 		ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1933 	} else if (iocd->stmf_error) {
1934 		(void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1935 	}
1936 	if (obuf) {
1937 		kmem_free(obuf, iocd->stmf_obuf_size);
1938 		obuf = NULL;
1939 	}
1940 	if (ibuf) {
1941 		kmem_free(ibuf, iocd->stmf_ibuf_size);
1942 		ibuf = NULL;
1943 	}
1944 	kmem_free(iocd, sizeof (stmf_iocdata_t));
1945 	return (ret);
1946 }
1947 
1948 static fct_status_t
qlt_force_lip(qlt_state_t * qlt)1949 qlt_force_lip(qlt_state_t *qlt)
1950 {
1951 	mbox_cmd_t	*mcp;
1952 	fct_status_t	 rval;
1953 
1954 	mcp = qlt_alloc_mailbox_command(qlt, 0);
1955 	mcp->to_fw[0] = 0x0072;
1956 	mcp->to_fw[1] = BIT_4;
1957 	mcp->to_fw[3] = 1;
1958 	mcp->to_fw_mask |= BIT_1 | BIT_3;
1959 	rval = qlt_mailbox_command(qlt, mcp);
1960 	if (rval != FCT_SUCCESS) {
1961 		EL(qlt, "qlt force lip MB failed: rval=%x", rval);
1962 	} else {
1963 		if (mcp->from_fw[0] != 0x4000) {
1964 			QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
1965 			    mcp->from_fw[0]);
1966 			rval = FCT_FAILURE;
1967 		}
1968 	}
1969 	qlt_free_mailbox_command(qlt, mcp);
1970 	return (rval);
1971 }
1972 
1973 static void
qlt_ctl(struct fct_local_port * port,int cmd,void * arg)1974 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1975 {
1976 	stmf_change_status_t		st;
1977 	stmf_state_change_info_t	*ssci = (stmf_state_change_info_t *)arg;
1978 	qlt_state_t			*qlt;
1979 	fct_status_t			ret;
1980 
1981 	ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1982 	    (cmd == FCT_CMD_PORT_OFFLINE) ||
1983 	    (cmd == FCT_CMD_FORCE_LIP) ||
1984 	    (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1985 	    (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1986 
1987 	qlt = (qlt_state_t *)port->port_fca_private;
1988 	st.st_completion_status = FCT_SUCCESS;
1989 	st.st_additional_info = NULL;
1990 
1991 	switch (cmd) {
1992 	case FCT_CMD_PORT_ONLINE:
1993 		if (qlt->qlt_state == FCT_STATE_ONLINE)
1994 			st.st_completion_status = STMF_ALREADY;
1995 		else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1996 			st.st_completion_status = FCT_FAILURE;
1997 		if (st.st_completion_status == FCT_SUCCESS) {
1998 			qlt->qlt_state = FCT_STATE_ONLINING;
1999 			qlt->qlt_state_not_acked = 1;
2000 			st.st_completion_status = qlt_port_online(qlt);
2001 			if (st.st_completion_status != STMF_SUCCESS) {
2002 				EL(qlt, "PORT_ONLINE status=%xh\n",
2003 				    st.st_completion_status);
2004 				qlt->qlt_state = FCT_STATE_OFFLINE;
2005 				qlt->qlt_state_not_acked = 0;
2006 			} else {
2007 				qlt->qlt_state = FCT_STATE_ONLINE;
2008 			}
2009 		}
2010 		fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
2011 		qlt->qlt_change_state_flags = 0;
2012 		break;
2013 
2014 	case FCT_CMD_PORT_OFFLINE:
2015 		if (qlt->qlt_state == FCT_STATE_OFFLINE) {
2016 			st.st_completion_status = STMF_ALREADY;
2017 		} else if (qlt->qlt_state != FCT_STATE_ONLINE) {
2018 			st.st_completion_status = FCT_FAILURE;
2019 		}
2020 		if (st.st_completion_status == FCT_SUCCESS) {
2021 			qlt->qlt_state = FCT_STATE_OFFLINING;
2022 			qlt->qlt_state_not_acked = 1;
2023 
2024 			if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
2025 				(void) qlt_firmware_dump(port, ssci);
2026 			}
2027 			qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
2028 			st.st_completion_status = qlt_port_offline(qlt);
2029 			if (st.st_completion_status != STMF_SUCCESS) {
2030 				EL(qlt, "PORT_OFFLINE status=%xh\n",
2031 				    st.st_completion_status);
2032 				qlt->qlt_state = FCT_STATE_ONLINE;
2033 				qlt->qlt_state_not_acked = 0;
2034 			} else {
2035 				qlt->qlt_state = FCT_STATE_OFFLINE;
2036 			}
2037 		}
2038 		fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
2039 		break;
2040 
2041 	case FCT_ACK_PORT_ONLINE_COMPLETE:
2042 		qlt->qlt_state_not_acked = 0;
2043 		break;
2044 
2045 	case FCT_ACK_PORT_OFFLINE_COMPLETE:
2046 		qlt->qlt_state_not_acked = 0;
2047 		if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
2048 		    (qlt->qlt_stay_offline == 0)) {
2049 			if ((ret = fct_port_initialize(port,
2050 			    qlt->qlt_change_state_flags,
2051 			    "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
2052 			    "with RLFLAG_RESET")) != FCT_SUCCESS) {
2053 				EL(qlt, "fct_port_initialize status=%llxh\n",
2054 				    ret);
2055 				cmn_err(CE_WARN, "qlt_ctl: "
2056 				    "fct_port_initialize failed, please use "
2057 				    "stmfstate to start the port-%s manualy",
2058 				    qlt->qlt_port_alias);
2059 			}
2060 		}
2061 		break;
2062 
2063 	case FCT_CMD_FORCE_LIP:
2064 		if (qlt->qlt_81xx_chip) {
2065 			EL(qlt, "force lip is an unsupported command "
2066 			    "for this adapter type\n");
2067 		} else {
2068 			*((fct_status_t *)arg) = qlt_force_lip(qlt);
2069 			EL(qlt, "forcelip done\n");
2070 		}
2071 		break;
2072 
2073 	default:
2074 		EL(qlt, "unsupport cmd - 0x%02X", cmd);
2075 		break;
2076 	}
2077 }
2078 
2079 /* ARGSUSED */
2080 static fct_status_t
qlt_do_flogi(fct_local_port_t * port,fct_flogi_xchg_t * fx)2081 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2082 {
2083 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
2084 
2085 	EL(qlt, "FLOGI requested not supported\n");
2086 	cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2087 	return (FCT_FAILURE);
2088 }
2089 
2090 /*
2091  * Return a pointer to n entries in the request queue. Assumes that
2092  * request queue lock is held. Does a very short busy wait if
2093  * less/zero entries are available. Retuns NULL if it still cannot
2094  * fullfill the request.
2095  * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2096  */
2097 caddr_t
qlt_get_req_entries(qlt_state_t * qlt,uint32_t n)2098 qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
2099 {
2100 	int try = 0;
2101 
2102 	while (qlt->req_available < n) {
2103 		uint32_t val1, val2, val3;
2104 		val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2105 		val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2106 		val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2107 		if ((val1 != val2) || (val2 != val3))
2108 			continue;
2109 
2110 		qlt->req_ndx_from_fw = val1;
2111 		qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2112 		    ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2113 		    (REQUEST_QUEUE_ENTRIES - 1));
2114 		if (qlt->req_available < n) {
2115 			if (try < 2) {
2116 				drv_usecwait(100);
2117 				try++;
2118 				continue;
2119 			} else {
2120 				stmf_trace(qlt->qlt_port_alias,
2121 				    "Req Q is full");
2122 				return (NULL);
2123 			}
2124 		}
2125 		break;
2126 	}
2127 	/* We dont change anything until the entries are sumitted */
2128 	return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
2129 }
2130 
2131 /*
2132  * updates the req in ptr to fw. Assumes that req lock is held.
2133  */
2134 void
qlt_submit_req_entries(qlt_state_t * qlt,uint32_t n)2135 qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
2136 {
2137 	ASSERT(n >= 1);
2138 	qlt->req_ndx_to_fw += n;
2139 	qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2140 	qlt->req_available -= n;
2141 	REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2142 }
2143 
2144 
2145 /*
2146  * Return a pointer to n entries in the priority request queue. Assumes that
2147  * priority request queue lock is held. Does a very short busy wait if
2148  * less/zero entries are available. Retuns NULL if it still cannot
2149  * fullfill the request.
2150  * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2151  */
2152 caddr_t
qlt_get_preq_entries(qlt_state_t * qlt,uint32_t n)2153 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2154 {
2155 	int try = 0;
2156 	uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2157 	    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2158 	    (PRIORITY_QUEUE_ENTRIES - 1));
2159 
2160 	while (req_available < n) {
2161 		uint32_t val1, val2, val3;
2162 		val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2163 		val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2164 		val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2165 		if ((val1 != val2) || (val2 != val3))
2166 			continue;
2167 
2168 		qlt->preq_ndx_from_fw = val1;
2169 		req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2170 		    ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2171 		    (PRIORITY_QUEUE_ENTRIES - 1));
2172 		if (req_available < n) {
2173 			if (try < 2) {
2174 				drv_usecwait(100);
2175 				try++;
2176 				continue;
2177 			} else {
2178 				return (NULL);
2179 			}
2180 		}
2181 		break;
2182 	}
2183 	/* We dont change anything until the entries are sumitted */
2184 	return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2185 }
2186 
2187 /*
2188  * updates the req in ptr to fw. Assumes that req lock is held.
2189  */
2190 void
qlt_submit_preq_entries(qlt_state_t * qlt,uint32_t n)2191 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2192 {
2193 	ASSERT(n >= 1);
2194 	qlt->preq_ndx_to_fw += n;
2195 	qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2196 	REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2197 }
2198 
2199 /*
2200  * - Should not be called from Interrupt.
2201  * - A very hardware specific function. Does not touch driver state.
2202  * - Assumes that interrupts are disabled or not there.
2203  * - Expects that the caller makes sure that all activity has stopped
2204  *   and its ok now to go ahead and reset the chip. Also the caller
2205  *   takes care of post reset damage control.
2206  * - called by initialize adapter() and dump_fw(for reset only).
2207  * - During attach() nothing much is happening and during initialize_adapter()
2208  *   the function (caller) does all the housekeeping so that this function
2209  *   can execute in peace.
2210  * - Returns 0 on success.
2211  */
2212 static fct_status_t
qlt_reset_chip(qlt_state_t * qlt)2213 qlt_reset_chip(qlt_state_t *qlt)
2214 {
2215 	int cntr;
2216 
2217 	EL(qlt, "initiated\n");
2218 
2219 	/* XXX: Switch off LEDs */
2220 
2221 	/* Disable Interrupts */
2222 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2223 	(void) REG_RD32(qlt, REG_INTR_CTRL);
2224 	/* Stop DMA */
2225 	REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2226 
2227 	/* Wait for DMA to be stopped */
2228 	cntr = 0;
2229 	while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2230 		delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2231 		cntr++;
2232 		/* 3 sec should be more than enough */
2233 		if (cntr == 300)
2234 			return (QLT_DMA_STUCK);
2235 	}
2236 
2237 	/* Reset the Chip */
2238 	REG_WR32(qlt, REG_CTRL_STATUS,
2239 	    DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2240 
2241 	qlt->qlt_link_up = 0;
2242 
2243 	drv_usecwait(100);
2244 
2245 	/* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2246 	cntr = 0;
2247 	while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2248 		delay(drv_usectohz(10000));
2249 		cntr++;
2250 		/* 3 sec should be more than enough */
2251 		if (cntr == 300)
2252 			return (QLT_ROM_STUCK);
2253 	}
2254 	/* Disable Interrupts (Probably not needed) */
2255 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2256 
2257 	return (QLT_SUCCESS);
2258 }
2259 /*
2260  * - Should not be called from Interrupt.
2261  * - A very hardware specific function. Does not touch driver state.
2262  * - Assumes that interrupts are disabled or not there.
2263  * - Expects that the caller makes sure that all activity has stopped
2264  *   and its ok now to go ahead and reset the chip. Also the caller
2265  *   takes care of post reset damage control.
2266  * - called by initialize adapter() and dump_fw(for reset only).
2267  * - During attach() nothing much is happening and during initialize_adapter()
2268  *   the function (caller) does all the housekeeping so that this function
2269  *   can execute in peace.
2270  * - Returns 0 on success.
2271  */
2272 static fct_status_t
qlt_download_fw(qlt_state_t * qlt)2273 qlt_download_fw(qlt_state_t *qlt)
2274 {
2275 	uint32_t start_addr;
2276 	fct_status_t ret;
2277 
2278 	EL(qlt, "initiated\n");
2279 
2280 	(void) qlt_reset_chip(qlt);
2281 
2282 	if (qlt->qlt_81xx_chip) {
2283 		qlt_mps_reset(qlt);
2284 	}
2285 
2286 	/* Load the two segments */
2287 	if (qlt->fw_code01 != NULL) {
2288 		ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2289 		    qlt->fw_addr01);
2290 		if (ret == QLT_SUCCESS) {
2291 			ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2292 			    qlt->fw_length02, qlt->fw_addr02);
2293 		}
2294 		start_addr = qlt->fw_addr01;
2295 	} else if (qlt->qlt_81xx_chip) {
2296 		ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2297 		    fw8100_addr01);
2298 		if (ret == QLT_SUCCESS) {
2299 			ret = qlt_load_risc_ram(qlt, fw8100_code02,
2300 			    fw8100_length02, fw8100_addr02);
2301 		}
2302 		start_addr = fw8100_addr01;
2303 	} else if (qlt->qlt_25xx_chip) {
2304 		ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2305 		    fw2500_addr01);
2306 		if (ret == QLT_SUCCESS) {
2307 			ret = qlt_load_risc_ram(qlt, fw2500_code02,
2308 			    fw2500_length02, fw2500_addr02);
2309 		}
2310 		start_addr = fw2500_addr01;
2311 	} else {
2312 		ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2313 		    fw2400_addr01);
2314 		if (ret == QLT_SUCCESS) {
2315 			ret = qlt_load_risc_ram(qlt, fw2400_code02,
2316 			    fw2400_length02, fw2400_addr02);
2317 		}
2318 		start_addr = fw2400_addr01;
2319 	}
2320 	if (ret != QLT_SUCCESS) {
2321 		EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2322 		return (ret);
2323 	}
2324 
2325 	/* Verify Checksum */
2326 	REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
2327 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2328 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2329 	ret = qlt_raw_mailbox_command(qlt);
2330 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2331 	if (ret != QLT_SUCCESS) {
2332 		EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2333 		return (ret);
2334 	}
2335 
2336 	/* Execute firmware */
2337 	REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
2338 	REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2339 	REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2340 	REG_WR16(qlt, REG_MBOX(3), 0);
2341 	REG_WR16(qlt, REG_MBOX(4), 1);	/* 25xx enable additional credits */
2342 	ret = qlt_raw_mailbox_command(qlt);
2343 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2344 	if (ret != QLT_SUCCESS) {
2345 		EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2346 		return (ret);
2347 	}
2348 
2349 	/* Get revisions (About Firmware) */
2350 	REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
2351 	ret = qlt_raw_mailbox_command(qlt);
2352 	qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2353 	qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2354 	qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2355 	qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2356 	qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2357 	qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2358 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2359 	if (ret != QLT_SUCCESS) {
2360 		EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2361 		return (ret);
2362 	}
2363 
2364 	return (QLT_SUCCESS);
2365 }
2366 
2367 /*
2368  * Used only from qlt_download_fw().
2369  */
2370 static fct_status_t
qlt_load_risc_ram(qlt_state_t * qlt,uint32_t * host_addr,uint32_t word_count,uint32_t risc_addr)2371 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2372 				uint32_t word_count, uint32_t risc_addr)
2373 {
2374 	uint32_t words_sent = 0;
2375 	uint32_t words_being_sent;
2376 	uint32_t *cur_host_addr;
2377 	uint32_t cur_risc_addr;
2378 	uint64_t da;
2379 	fct_status_t ret;
2380 
2381 	while (words_sent < word_count) {
2382 		cur_host_addr = &(host_addr[words_sent]);
2383 		cur_risc_addr = risc_addr + (words_sent << 2);
2384 		words_being_sent = min(word_count - words_sent,
2385 		    TOTAL_DMA_MEM_SIZE >> 2);
2386 		ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2387 		    (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2388 		    DDI_DEV_AUTOINCR);
2389 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2390 		    words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2391 		da = qlt->queue_mem_cookie.dmac_laddress;
2392 		REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
2393 		REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
2394 		REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
2395 		REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2396 		REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2397 		REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2398 		REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2399 		REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
2400 		REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
2401 		ret = qlt_raw_mailbox_command(qlt);
2402 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2403 		if (ret != QLT_SUCCESS) {
2404 			EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2405 			    ret);
2406 			return (ret);
2407 		}
2408 		words_sent += words_being_sent;
2409 	}
2410 	return (QLT_SUCCESS);
2411 }
2412 
2413 /*
2414  * Not used during normal operation. Only during driver init.
2415  * Assumes that interrupts are disabled and mailboxes are loaded.
2416  * Just triggers the mailbox command an waits for the completion.
2417  * Also expects that There is nothing else going on and we will only
2418  * get back a mailbox completion from firmware.
2419  * ---DOES NOT CLEAR INTERRUPT---
2420  * Used only from the code path originating from
2421  * qlt_reset_chip_and_download_fw()
2422  */
2423 static fct_status_t
qlt_raw_mailbox_command(qlt_state_t * qlt)2424 qlt_raw_mailbox_command(qlt_state_t *qlt)
2425 {
2426 	int cntr = 0;
2427 	uint32_t status;
2428 
2429 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2430 	while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
2431 		cntr++;
2432 		if (cntr == 100) {
2433 			return (QLT_MAILBOX_STUCK);
2434 		}
2435 		delay(drv_usectohz(10000));
2436 	}
2437 	status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
2438 
2439 	if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
2440 	    (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
2441 	    (status == MBX_CMD_SUCCESSFUL) ||
2442 	    (status == MBX_CMD_NOT_SUCCESSFUL)) {
2443 		uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2444 		if (mbox0 == QLT_MBX_CMD_SUCCESS) {
2445 			return (QLT_SUCCESS);
2446 		} else {
2447 			return (QLT_MBOX_FAILED | mbox0);
2448 		}
2449 	}
2450 	/* This is unexpected, dump a message */
2451 	cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2452 	    ddi_get_instance(qlt->dip), (unsigned long long)status);
2453 	return (QLT_UNEXPECTED_RESPONSE);
2454 }
2455 
2456 static mbox_cmd_t *
qlt_alloc_mailbox_command(qlt_state_t * qlt,uint32_t dma_size)2457 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2458 {
2459 	mbox_cmd_t *mcp;
2460 
2461 	mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2462 	if (dma_size) {
2463 		qlt_dmem_bctl_t *bctl;
2464 		uint64_t da;
2465 
2466 		mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2467 		if (mcp->dbuf == NULL) {
2468 			kmem_free(mcp, sizeof (*mcp));
2469 			return (NULL);
2470 		}
2471 		mcp->dbuf->db_data_size = dma_size;
2472 		ASSERT(mcp->dbuf->db_sglist_length == 1);
2473 
2474 		bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2475 		da = bctl->bctl_dev_addr;
2476 		/* This is the most common initialization of dma ptrs */
2477 		mcp->to_fw[3] = LSW(LSD(da));
2478 		mcp->to_fw[2] = MSW(LSD(da));
2479 		mcp->to_fw[7] = LSW(MSD(da));
2480 		mcp->to_fw[6] = MSW(MSD(da));
2481 		mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2482 	}
2483 	mcp->to_fw_mask |= BIT_0;
2484 	mcp->from_fw_mask |= BIT_0;
2485 	return (mcp);
2486 }
2487 
2488 void
qlt_free_mailbox_command(qlt_state_t * qlt,mbox_cmd_t * mcp)2489 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2490 {
2491 	if (mcp->dbuf)
2492 		qlt_i_dmem_free(qlt, mcp->dbuf);
2493 	kmem_free(mcp, sizeof (*mcp));
2494 }
2495 
2496 /*
2497  * This can sleep. Should never be called from interrupt context.
2498  */
2499 static fct_status_t
qlt_mailbox_command(qlt_state_t * qlt,mbox_cmd_t * mcp)2500 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2501 {
2502 	int	retries;
2503 	int	i;
2504 	char	info[QLT_INFO_LEN];
2505 
2506 	if (curthread->t_flag & T_INTR_THREAD) {
2507 		ASSERT(0);
2508 		return (QLT_MBOX_FAILED);
2509 	}
2510 
2511 	mutex_enter(&qlt->mbox_lock);
2512 	/* See if mailboxes are still uninitialized */
2513 	if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2514 		mutex_exit(&qlt->mbox_lock);
2515 		return (QLT_MBOX_NOT_INITIALIZED);
2516 	}
2517 
2518 	/* Wait to grab the mailboxes */
2519 	for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2520 	    retries++) {
2521 		cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2522 		if ((retries > 5) ||
2523 		    (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2524 			mutex_exit(&qlt->mbox_lock);
2525 			return (QLT_MBOX_BUSY);
2526 		}
2527 	}
2528 	/* Make sure we always ask for mailbox 0 */
2529 	mcp->from_fw_mask |= BIT_0;
2530 
2531 	/* Load mailboxes, set state and generate RISC interrupt */
2532 	qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2533 	qlt->mcp = mcp;
2534 	for (i = 0; i < MAX_MBOXES; i++) {
2535 		if (mcp->to_fw_mask & ((uint32_t)1 << i))
2536 			REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2537 	}
2538 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2539 
2540 qlt_mbox_wait_loop:;
2541 	/* Wait for mailbox command completion */
2542 	if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2543 	    + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2544 		(void) snprintf(info, sizeof (info),
2545 		    "qlt_mailbox_command: qlt-%p, "
2546 		    "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
2547 		qlt->mcp = NULL;
2548 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2549 		mutex_exit(&qlt->mbox_lock);
2550 
2551 		/*
2552 		 * XXX Throw HBA fatal error event
2553 		 */
2554 		(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2555 		    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2556 		return (QLT_MBOX_TIMEOUT);
2557 	}
2558 	if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2559 		goto qlt_mbox_wait_loop;
2560 
2561 	qlt->mcp = NULL;
2562 
2563 	/* Make sure its a completion */
2564 	if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2565 		ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2566 		mutex_exit(&qlt->mbox_lock);
2567 		return (QLT_MBOX_ABORTED);
2568 	}
2569 
2570 	/* MBox command completed. Clear state, retuen based on mbox 0 */
2571 	/* Mailboxes are already loaded by interrupt routine */
2572 	qlt->mbox_io_state = MBOX_STATE_READY;
2573 	mutex_exit(&qlt->mbox_lock);
2574 	if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS)
2575 		return (QLT_MBOX_FAILED | mcp->from_fw[0]);
2576 
2577 	return (QLT_SUCCESS);
2578 }
2579 
2580 /*
2581  * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2582  */
2583 /* ARGSUSED */
2584 static uint_t
qlt_isr(caddr_t arg,caddr_t arg2)2585 qlt_isr(caddr_t arg, caddr_t arg2)
2586 {
2587 	qlt_state_t	*qlt = (qlt_state_t *)arg;
2588 	uint32_t	risc_status, intr_type;
2589 	int		i;
2590 	int		intr_loop_count;
2591 	char		info[QLT_INFO_LEN];
2592 
2593 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2594 	if (!mutex_tryenter(&qlt->intr_lock)) {
2595 		/*
2596 		 * Normally we will always get this lock. If tryenter is
2597 		 * failing then it means that driver is trying to do
2598 		 * some cleanup and is masking the intr but some intr
2599 		 * has sneaked in between. See if our device has generated
2600 		 * this intr. If so then wait a bit and return claimed.
2601 		 * If not then return claimed if this is the 1st instance
2602 		 * of a interrupt after driver has grabbed the lock.
2603 		 */
2604 		if (risc_status & BIT_15) {
2605 			drv_usecwait(10);
2606 			return (DDI_INTR_CLAIMED);
2607 		} else if (qlt->intr_sneak_counter) {
2608 			qlt->intr_sneak_counter--;
2609 			return (DDI_INTR_CLAIMED);
2610 		} else {
2611 			return (DDI_INTR_UNCLAIMED);
2612 		}
2613 	}
2614 	if (((risc_status & BIT_15) == 0) ||
2615 	    (qlt->qlt_intr_enabled == 0)) {
2616 		/*
2617 		 * This might be a pure coincedence that we are operating
2618 		 * in a interrupt disabled mode and another device
2619 		 * sharing the interrupt line has generated an interrupt
2620 		 * while an interrupt from our device might be pending. Just
2621 		 * ignore it and let the code handling the interrupt
2622 		 * disabled mode handle it.
2623 		 */
2624 		mutex_exit(&qlt->intr_lock);
2625 		return (DDI_INTR_UNCLAIMED);
2626 	}
2627 
2628 	/*
2629 	 * XXX take care for MSI case. disable intrs
2630 	 * Its gonna be complicated because of the max iterations.
2631 	 * as hba will have posted the intr which did not go on PCI
2632 	 * but we did not service it either because of max iterations.
2633 	 * Maybe offload the intr on a different thread.
2634 	 */
2635 	intr_loop_count = 0;
2636 
2637 	REG_WR32(qlt, REG_INTR_CTRL, 0);
2638 
2639 intr_again:;
2640 
2641 	/* check for risc pause */
2642 	if (risc_status & BIT_8) {
2643 		EL(qlt, "Risc Pause status=%xh\n", risc_status);
2644 		cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2645 		    qlt->instance, risc_status);
2646 		(void) snprintf(info, sizeof (info), "Risc Pause %08x",
2647 		    risc_status);
2648 		(void) fct_port_shutdown(qlt->qlt_port,
2649 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2650 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2651 	}
2652 
2653 	/* First check for high performance path */
2654 	intr_type = risc_status & 0xff;
2655 	if (intr_type == 0x1D) {
2656 		qlt->atio_ndx_from_fw = (uint16_t)
2657 		    REG_RD32(qlt, REG_ATIO_IN_PTR);
2658 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2659 		qlt->resp_ndx_from_fw = risc_status >> 16;
2660 		qlt_handle_atio_queue_update(qlt);
2661 		qlt_handle_resp_queue_update(qlt);
2662 	} else if (intr_type == 0x1C) {
2663 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2664 		qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2665 		qlt_handle_atio_queue_update(qlt);
2666 	} else if (intr_type == 0x13) {
2667 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2668 		qlt->resp_ndx_from_fw = risc_status >> 16;
2669 		qlt_handle_resp_queue_update(qlt);
2670 	} else if (intr_type == 0x12) {
2671 		uint16_t code = (uint16_t)(risc_status >> 16);
2672 		uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2673 		uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2674 		uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2675 		uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2676 		uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2677 		uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2678 
2679 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2680 		stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2681 		    " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
2682 		    mbox5, mbox6);
2683 		EL(qlt, "Async event %x mb1=%x mb2=%x, mb3=%x, mb5=%x, mb6=%x",
2684 		    code, mbox1, mbox2, mbox3, mbox5, mbox6);
2685 
2686 		if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2687 			if (qlt->qlt_link_up) {
2688 				fct_handle_event(qlt->qlt_port,
2689 				    FCT_EVENT_LINK_RESET, 0, 0);
2690 			}
2691 		} else if (code == 0x8012) {
2692 			qlt->qlt_link_up = 0;
2693 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2694 			    0, 0);
2695 		} else if (code == 0x8011) {
2696 			switch (mbox1) {
2697 			case 0: qlt->link_speed = PORT_SPEED_1G;
2698 				break;
2699 			case 1: qlt->link_speed = PORT_SPEED_2G;
2700 				break;
2701 			case 3: qlt->link_speed = PORT_SPEED_4G;
2702 				break;
2703 			case 4: qlt->link_speed = PORT_SPEED_8G;
2704 				break;
2705 			case 0x13: qlt->link_speed = PORT_SPEED_10G;
2706 				break;
2707 			default:
2708 				qlt->link_speed = PORT_SPEED_UNKNOWN;
2709 			}
2710 			qlt->qlt_link_up = 1;
2711 			fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2712 			    0, 0);
2713 		} else if ((code == 0x8002) || (code == 0x8003) ||
2714 		    (code == 0x8004) || (code == 0x8005)) {
2715 			(void) snprintf(info, sizeof (info),
2716 			    "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2717 			    code, mbox1, mbox2, mbox5, mbox6);
2718 			(void) fct_port_shutdown(qlt->qlt_port,
2719 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2720 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2721 		} else if (code == 0x800F) {
2722 			(void) snprintf(info, sizeof (info),
2723 			    "Got 800F, mb1=%x mb2=%x mb3=%x",
2724 			    mbox1, mbox2, mbox3);
2725 
2726 			if (mbox1 != 1) {
2727 				/* issue "verify fw" */
2728 				qlt_verify_fw(qlt);
2729 			}
2730 		} else if (code == 0x8101) {
2731 			(void) snprintf(info, sizeof (info),
2732 			    "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2733 			    code, mbox1, mbox2, mbox3);
2734 
2735 			/* check if "ACK" is required (timeout != 0) */
2736 			if (mbox1 & 0x0f00) {
2737 				caddr_t	req;
2738 
2739 				/*
2740 				 * Ack the request (queue work to do it?)
2741 				 * using a mailbox iocb
2742 				 */
2743 				mutex_enter(&qlt->req_lock);
2744 				req = qlt_get_req_entries(qlt, 1);
2745 				if (req) {
2746 					bzero(req, IOCB_SIZE);
2747 					req[0] = 0x39; req[1] = 1;
2748 					QMEM_WR16(qlt, req+8, 0x101);
2749 					QMEM_WR16(qlt, req+10, mbox1);
2750 					QMEM_WR16(qlt, req+12, mbox2);
2751 					QMEM_WR16(qlt, req+14, mbox3);
2752 					QMEM_WR16(qlt, req+16, mbox4);
2753 					QMEM_WR16(qlt, req+18, mbox5);
2754 					QMEM_WR16(qlt, req+20, mbox6);
2755 					qlt_submit_req_entries(qlt, 1);
2756 				} else {
2757 					(void) snprintf(info, sizeof (info),
2758 					    "IDC ACK failed");
2759 				}
2760 				mutex_exit(&qlt->req_lock);
2761 			}
2762 		}
2763 	} else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2764 		/* Handle mailbox completion */
2765 		mutex_enter(&qlt->mbox_lock);
2766 		if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2767 			cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2768 			    " when driver wasn't waiting for it %d",
2769 			    qlt->instance, qlt->mbox_io_state);
2770 		} else {
2771 			for (i = 0; i < MAX_MBOXES; i++) {
2772 				if (qlt->mcp->from_fw_mask &
2773 				    (((uint32_t)1) << i)) {
2774 					qlt->mcp->from_fw[i] =
2775 					    REG_RD16(qlt, REG_MBOX(i));
2776 				}
2777 			}
2778 			qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2779 		}
2780 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2781 		cv_broadcast(&qlt->mbox_cv);
2782 		mutex_exit(&qlt->mbox_lock);
2783 	} else {
2784 		cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2785 		    qlt->instance, intr_type);
2786 		REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2787 	}
2788 
2789 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting */
2790 	risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2791 	if ((risc_status & BIT_15) &&
2792 	    (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2793 		goto intr_again;
2794 	}
2795 
2796 	REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2797 
2798 	mutex_exit(&qlt->intr_lock);
2799 	return (DDI_INTR_CLAIMED);
2800 }
2801 
2802 /* **************** NVRAM Functions ********************** */
2803 
2804 fct_status_t
qlt_read_flash_word(qlt_state_t * qlt,uint32_t faddr,uint32_t * bp)2805 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2806 {
2807 	uint32_t	timer;
2808 
2809 	/* Clear access error flag */
2810 	REG_WR32(qlt, REG_CTRL_STATUS,
2811 	    REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2812 
2813 	REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2814 
2815 	/* Wait for READ cycle to complete. */
2816 	for (timer = 3000; timer; timer--) {
2817 		if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2818 			break;
2819 		}
2820 		drv_usecwait(10);
2821 	}
2822 	if (timer == 0) {
2823 		EL(qlt, "flash timeout\n");
2824 		return (QLT_FLASH_TIMEOUT);
2825 	} else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2826 		EL(qlt, "flash access error\n");
2827 		return (QLT_FLASH_ACCESS_ERROR);
2828 	}
2829 
2830 	*bp = REG_RD32(qlt, REG_FLASH_DATA);
2831 
2832 	return (QLT_SUCCESS);
2833 }
2834 
2835 fct_status_t
qlt_read_nvram(qlt_state_t * qlt)2836 qlt_read_nvram(qlt_state_t *qlt)
2837 {
2838 	uint32_t		index, addr, chksum;
2839 	uint32_t		val, *ptr;
2840 	fct_status_t		ret;
2841 	qlt_nvram_t		*nv;
2842 	uint64_t		empty_node_name = 0;
2843 
2844 	if (qlt->qlt_81xx_chip) {
2845 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2846 		    QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2847 	} else if (qlt->qlt_25xx_chip) {
2848 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2849 		    QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2850 	} else {
2851 		addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2852 		    NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2853 	}
2854 	mutex_enter(&qlt_global_lock);
2855 
2856 	/* Pause RISC. */
2857 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
2858 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2859 
2860 	/* Get NVRAM data and calculate checksum. */
2861 	ptr = (uint32_t *)qlt->nvram;
2862 	chksum = 0;
2863 	for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2864 		ret = qlt_read_flash_word(qlt, addr++, &val);
2865 		if (ret != QLT_SUCCESS) {
2866 			EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
2867 			mutex_exit(&qlt_global_lock);
2868 			return (ret);
2869 		}
2870 		chksum += val;
2871 		*ptr = LE_32(val);
2872 		ptr++;
2873 	}
2874 
2875 	/* Release RISC Pause */
2876 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
2877 	(void) REG_RD32(qlt, REG_HCCR);	/* PCI Posting. */
2878 
2879 	mutex_exit(&qlt_global_lock);
2880 
2881 	/* Sanity check NVRAM Data */
2882 	nv = qlt->nvram;
2883 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2884 	    nv->id[2] != 'P' || nv->id[3] != ' ' ||
2885 	    (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2886 		EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2887 		    nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2888 		    nv->nvram_version[1], nv->nvram_version[0]);
2889 		return (QLT_BAD_NVRAM_DATA);
2890 	}
2891 
2892 	/* If node name is zero, hand craft it from port name */
2893 	if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2894 		bcopy(nv->port_name, nv->node_name, 8);
2895 		nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2896 		nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2897 	}
2898 
2899 	return (QLT_SUCCESS);
2900 }
2901 
2902 uint32_t
qlt_sync_atio_queue(qlt_state_t * qlt)2903 qlt_sync_atio_queue(qlt_state_t *qlt)
2904 {
2905 	uint32_t total_ent;
2906 
2907 	if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2908 		total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2909 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2910 		    + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2911 		    DDI_DMA_SYNC_FORCPU);
2912 	} else {
2913 		total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2914 		    qlt->atio_ndx_from_fw;
2915 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2916 		    + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2917 		    qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2918 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2919 		    ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2920 		    DDI_DMA_SYNC_FORCPU);
2921 	}
2922 	return (total_ent);
2923 }
2924 
2925 void
qlt_handle_atio_queue_update(qlt_state_t * qlt)2926 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2927 {
2928 	uint32_t total_ent;
2929 
2930 	if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2931 		return;
2932 
2933 	total_ent = qlt_sync_atio_queue(qlt);
2934 
2935 	do {
2936 		uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2937 		    qlt->atio_ndx_to_fw << 6];
2938 		uint32_t ent_cnt;
2939 
2940 		ent_cnt = (uint32_t)(atio[1]);
2941 		if (ent_cnt > total_ent) {
2942 			break;
2943 		}
2944 		switch ((uint8_t)(atio[0])) {
2945 		case 0x0d:	/* INOT */
2946 			qlt_handle_inot(qlt, atio);
2947 			break;
2948 		case 0x06:	/* ATIO */
2949 			qlt_handle_atio(qlt, atio);
2950 			break;
2951 		default:
2952 			EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2953 			cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2954 			    "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2955 			break;
2956 		}
2957 		qlt->atio_ndx_to_fw = (uint16_t)(
2958 		    (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2959 		total_ent -= ent_cnt;
2960 	} while (total_ent > 0);
2961 	REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
2962 }
2963 
2964 uint32_t
qlt_sync_resp_queue(qlt_state_t * qlt)2965 qlt_sync_resp_queue(qlt_state_t *qlt)
2966 {
2967 	uint32_t total_ent;
2968 
2969 	if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2970 		total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2971 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2972 		    RESPONSE_QUEUE_OFFSET
2973 		    + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2974 		    DDI_DMA_SYNC_FORCPU);
2975 	} else {
2976 		total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2977 		    qlt->resp_ndx_from_fw;
2978 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2979 		    RESPONSE_QUEUE_OFFSET
2980 		    + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2981 		    qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2982 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2983 		    RESPONSE_QUEUE_OFFSET,
2984 		    qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
2985 	}
2986 	return (total_ent);
2987 }
2988 
2989 void
qlt_handle_resp_queue_update(qlt_state_t * qlt)2990 qlt_handle_resp_queue_update(qlt_state_t *qlt)
2991 {
2992 	uint32_t total_ent;
2993 	uint8_t c;
2994 
2995 	if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
2996 		return;
2997 
2998 	total_ent = qlt_sync_resp_queue(qlt);
2999 
3000 	do {
3001 		caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
3002 		uint32_t ent_cnt;
3003 
3004 		ent_cnt = (uint32_t)(resp[0] == 0x51 ? resp[1] : 1);
3005 		if (ent_cnt > total_ent) {
3006 			break;
3007 		}
3008 		switch ((uint8_t)(resp[0])) {
3009 		case 0x12:	/* CTIO completion */
3010 			qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
3011 			break;
3012 		case 0x0e:	/* NACK */
3013 			/* Do Nothing */
3014 			break;
3015 		case 0x1b:	/* Verify FW */
3016 			qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
3017 			break;
3018 		case 0x29:	/* CT PassThrough */
3019 			qlt_handle_ct_completion(qlt, (uint8_t *)resp);
3020 			break;
3021 		case 0x33:	/* Abort IO IOCB completion */
3022 			qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
3023 			break;
3024 		case 0x51:	/* PUREX */
3025 			qlt_handle_purex(qlt, (uint8_t *)resp);
3026 			break;
3027 		case 0x52:
3028 			qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
3029 			break;
3030 		case 0x53:	/* ELS passthrough */
3031 			c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
3032 			if (c == 0) {
3033 				qlt_handle_sol_els_completion(qlt,
3034 				    (uint8_t *)resp);
3035 			} else if (c == 3) {
3036 				qlt_handle_unsol_els_abort_completion(qlt,
3037 				    (uint8_t *)resp);
3038 			} else {
3039 				qlt_handle_unsol_els_completion(qlt,
3040 				    (uint8_t *)resp);
3041 			}
3042 			break;
3043 		case 0x54:	/* ABTS received */
3044 			qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
3045 			break;
3046 		case 0x55:	/* ABTS completion */
3047 			qlt_handle_abts_completion(qlt, (uint8_t *)resp);
3048 			break;
3049 		default:
3050 			EL(qlt, "response entry=%xh\n", resp[0]);
3051 			break;
3052 		}
3053 		qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
3054 		    (RESPONSE_QUEUE_ENTRIES - 1);
3055 		total_ent -= ent_cnt;
3056 	} while (total_ent > 0);
3057 	REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
3058 }
3059 
3060 fct_status_t
qlt_portid_to_handle(qlt_state_t * qlt,uint32_t id,uint16_t cmd_handle,uint16_t * ret_handle)3061 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
3062 				uint16_t *ret_handle)
3063 {
3064 	fct_status_t ret;
3065 	mbox_cmd_t *mcp;
3066 	uint16_t n;
3067 	uint16_t h;
3068 	uint32_t ent_id;
3069 	uint8_t *p;
3070 	int found = 0;
3071 
3072 	mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
3073 	if (mcp == NULL) {
3074 		return (STMF_ALLOC_FAILURE);
3075 	}
3076 	mcp->to_fw[0] = MBC_GET_ID_LIST;
3077 	mcp->to_fw[8] = 2048 * 8;
3078 	mcp->to_fw[9] = 0;
3079 	mcp->to_fw_mask |= BIT_9 | BIT_8;
3080 	mcp->from_fw_mask |= BIT_1 | BIT_2;
3081 
3082 	ret = qlt_mailbox_command(qlt, mcp);
3083 	if (ret != QLT_SUCCESS) {
3084 		EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
3085 		cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
3086 		    "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
3087 		    mcp->from_fw[1], mcp->from_fw[2]);
3088 		qlt_free_mailbox_command(qlt, mcp);
3089 		return (ret);
3090 	}
3091 	qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
3092 	p = mcp->dbuf->db_sglist[0].seg_addr;
3093 	for (n = 0; n < mcp->from_fw[1]; n++) {
3094 		ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
3095 		h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
3096 		if (ent_id == id) {
3097 			found = 1;
3098 			*ret_handle = h;
3099 			if ((cmd_handle != FCT_HANDLE_NONE) &&
3100 			    (cmd_handle != h)) {
3101 				cmn_err(CE_WARN, "login for portid %x came in "
3102 				    "with handle %x, while the portid was "
3103 				    "already using a different handle %x",
3104 				    id, cmd_handle, h);
3105 				qlt_free_mailbox_command(qlt, mcp);
3106 				return (QLT_FAILURE);
3107 			}
3108 			break;
3109 		}
3110 		if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
3111 			cmn_err(CE_WARN, "login for portid %x came in with "
3112 			    "handle %x, while the handle was already in use "
3113 			    "for portid %x", id, cmd_handle, ent_id);
3114 			qlt_free_mailbox_command(qlt, mcp);
3115 			return (QLT_FAILURE);
3116 		}
3117 		p += 8;
3118 	}
3119 	if (!found) {
3120 		*ret_handle = cmd_handle;
3121 	}
3122 	qlt_free_mailbox_command(qlt, mcp);
3123 	return (FCT_SUCCESS);
3124 }
3125 
3126 /* ARGSUSED */
3127 fct_status_t
qlt_fill_plogi_req(fct_local_port_t * port,fct_remote_port_t * rp,fct_cmd_t * login)3128 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
3129 				fct_cmd_t *login)
3130 {
3131 	uint8_t *p;
3132 
3133 	p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3134 	p[0] = ELS_OP_PLOGI;
3135 	*((uint16_t *)(&p[4])) = 0x2020;
3136 	p[7] = 3;
3137 	p[8] = 0x88;
3138 	p[10] = 8;
3139 	p[13] = 0xff; p[15] = 0x1f;
3140 	p[18] = 7; p[19] = 0xd0;
3141 
3142 	bcopy(port->port_pwwn, p + 20, 8);
3143 	bcopy(port->port_nwwn, p + 28, 8);
3144 
3145 	p[68] = 0x80;
3146 	p[74] = 8;
3147 	p[77] = 0xff;
3148 	p[81] = 1;
3149 
3150 	return (FCT_SUCCESS);
3151 }
3152 
3153 /* ARGSUSED */
3154 fct_status_t
qlt_fill_plogi_resp(fct_local_port_t * port,fct_remote_port_t * rp,fct_cmd_t * login)3155 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
3156 				fct_cmd_t *login)
3157 {
3158 	return (FCT_SUCCESS);
3159 }
3160 
3161 fct_status_t
qlt_register_remote_port(fct_local_port_t * port,fct_remote_port_t * rp,fct_cmd_t * login)3162 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
3163     fct_cmd_t *login)
3164 {
3165 	uint16_t h;
3166 	fct_status_t ret;
3167 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3168 
3169 	switch (rp->rp_id) {
3170 	case 0xFFFFFC:	h = 0x7FC; break;
3171 	case 0xFFFFFD:	h = 0x7FD; break;
3172 	case 0xFFFFFE:	h = 0x7FE; break;
3173 	case 0xFFFFFF:	h = 0x7FF; break;
3174 	default:
3175 		ret = qlt_portid_to_handle(qlt, rp->rp_id,
3176 		    login->cmd_rp_handle, &h);
3177 		if (ret != FCT_SUCCESS) {
3178 			EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3179 			return (ret);
3180 		}
3181 	}
3182 
3183 	if (login->cmd_type == FCT_CMD_SOL_ELS) {
3184 		ret = qlt_fill_plogi_req(port, rp, login);
3185 	} else {
3186 		ret = qlt_fill_plogi_resp(port, rp, login);
3187 	}
3188 
3189 	if (ret != FCT_SUCCESS) {
3190 		EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3191 		return (ret);
3192 	}
3193 
3194 	if (h == FCT_HANDLE_NONE)
3195 		return (FCT_SUCCESS);
3196 
3197 	if (rp->rp_handle == FCT_HANDLE_NONE) {
3198 		rp->rp_handle = h;
3199 		return (FCT_SUCCESS);
3200 	}
3201 
3202 	if (rp->rp_handle == h)
3203 		return (FCT_SUCCESS);
3204 
3205 	EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3206 	return (FCT_FAILURE);
3207 }
3208 /* invoked in single thread */
3209 fct_status_t
qlt_deregister_remote_port(fct_local_port_t * port,fct_remote_port_t * rp)3210 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3211 {
3212 	uint8_t *req;
3213 	qlt_state_t *qlt;
3214 	clock_t	dereg_req_timer;
3215 	fct_status_t ret;
3216 
3217 	qlt = (qlt_state_t *)port->port_fca_private;
3218 
3219 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3220 	    (qlt->qlt_state == FCT_STATE_OFFLINING))
3221 		return (FCT_SUCCESS);
3222 	ASSERT(qlt->rp_id_in_dereg == 0);
3223 
3224 	mutex_enter(&qlt->preq_lock);
3225 	req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3226 	if (req == NULL) {
3227 		mutex_exit(&qlt->preq_lock);
3228 		return (FCT_BUSY);
3229 	}
3230 	bzero(req, IOCB_SIZE);
3231 	req[0] = 0x52; req[1] = 1;
3232 	/* QMEM_WR32(qlt, (&req[4]), 0xffffffff);  */
3233 	QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3234 	QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3235 	QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3236 	qlt->rp_id_in_dereg = rp->rp_id;
3237 	qlt_submit_preq_entries(qlt, 1);
3238 
3239 	dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
3240 	if (cv_timedwait(&qlt->rp_dereg_cv,
3241 	    &qlt->preq_lock, dereg_req_timer) > 0) {
3242 		ret = qlt->rp_dereg_status;
3243 	} else {
3244 		ret = FCT_BUSY;
3245 	}
3246 	qlt->rp_dereg_status = 0;
3247 	qlt->rp_id_in_dereg = 0;
3248 	mutex_exit(&qlt->preq_lock);
3249 	return (ret);
3250 }
3251 
3252 /*
3253  * Pass received ELS up to framework.
3254  */
3255 static void
qlt_handle_purex(qlt_state_t * qlt,uint8_t * resp)3256 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3257 {
3258 	fct_cmd_t		*cmd;
3259 	fct_els_t		*els;
3260 	qlt_cmd_t		*qcmd;
3261 	uint32_t		payload_size;
3262 	uint32_t		remote_portid;
3263 	uint8_t			*pldptr, *bndrptr;
3264 	int			i, off;
3265 	uint16_t		iocb_flags;
3266 	char			info[QLT_INFO_LEN];
3267 
3268 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3269 	    ((uint32_t)(resp[0x1A])) << 16;
3270 	iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3271 	if (iocb_flags & BIT_15) {
3272 		payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3273 	} else {
3274 		payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3275 	}
3276 
3277 	if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
3278 		EL(qlt, "payload is too large = %xh\n", payload_size);
3279 		cmn_err(CE_WARN, "handle_purex: payload is too large");
3280 		goto cmd_null;
3281 	}
3282 
3283 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3284 	    (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3285 	if (cmd == NULL) {
3286 		EL(qlt, "fct_alloc cmd==NULL\n");
3287 cmd_null:;
3288 		(void) snprintf(info, sizeof (info),
3289 		    "qlt_handle_purex: qlt-%p, "
3290 		    "can't allocate space for fct_cmd", (void *)qlt);
3291 		(void) fct_port_shutdown(qlt->qlt_port,
3292 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3293 		return;
3294 	}
3295 
3296 	cmd->cmd_port = qlt->qlt_port;
3297 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3298 	if (cmd->cmd_rp_handle == 0xFFFF) {
3299 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3300 	}
3301 
3302 	els = (fct_els_t *)cmd->cmd_specific;
3303 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3304 	els->els_req_size = (uint16_t)payload_size;
3305 	els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3306 	    GET_STRUCT_SIZE(qlt_cmd_t));
3307 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3308 	cmd->cmd_rportid = remote_portid;
3309 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3310 	    ((uint32_t)(resp[0x16])) << 16;
3311 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3312 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3313 	pldptr = &resp[0x2C];
3314 	bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
3315 	for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3316 		/* Take care of fw's swapping of payload */
3317 		els->els_req_payload[i] = pldptr[3];
3318 		els->els_req_payload[i+1] = pldptr[2];
3319 		els->els_req_payload[i+2] = pldptr[1];
3320 		els->els_req_payload[i+3] = pldptr[0];
3321 		pldptr += 4;
3322 		if (pldptr == bndrptr)
3323 			pldptr = (uint8_t *)qlt->resp_ptr;
3324 		off += 4;
3325 		if (off >= IOCB_SIZE) {
3326 			off = 4;
3327 			pldptr += 4;
3328 		}
3329 	}
3330 	fct_post_rcvd_cmd(cmd, 0);
3331 }
3332 
3333 fct_status_t
qlt_send_cmd_response(fct_cmd_t * cmd,uint32_t ioflags)3334 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3335 {
3336 	qlt_state_t	*qlt;
3337 	char		info[QLT_INFO_LEN];
3338 
3339 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3340 
3341 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3342 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3343 			EL(qlt, "ioflags = %xh\n", ioflags);
3344 			goto fatal_panic;
3345 		} else {
3346 			return (qlt_send_status(qlt, cmd));
3347 		}
3348 	}
3349 
3350 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3351 		if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3352 			goto fatal_panic;
3353 		} else {
3354 			return (qlt_send_els_response(qlt, cmd));
3355 		}
3356 	}
3357 
3358 	if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3359 		cmd->cmd_handle = 0;
3360 	}
3361 
3362 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3363 		return (qlt_send_abts_response(qlt, cmd, 0));
3364 	} else {
3365 		EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3366 		ASSERT(0);
3367 		return (FCT_FAILURE);
3368 	}
3369 
3370 fatal_panic:;
3371 	(void) snprintf(info, sizeof (info),
3372 	    "qlt_send_cmd_response: can not handle "
3373 	    "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3374 	    ioflags);
3375 	(void) fct_port_shutdown(qlt->qlt_port,
3376 	    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3377 	return (FCT_FAILURE);
3378 }
3379 
3380 /* ARGSUSED */
3381 fct_status_t
qlt_xfer_scsi_data(fct_cmd_t * cmd,stmf_data_buf_t * dbuf,uint32_t ioflags)3382 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3383 {
3384 	qlt_dmem_bctl_t	*bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3385 	qlt_state_t	*qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3386 	qlt_cmd_t	*qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3387 	uint8_t		*req, rcnt;
3388 	uint16_t	flags;
3389 	uint16_t	cookie_count;
3390 
3391 	if (dbuf->db_handle == 0)
3392 		qcmd->dbuf = dbuf;
3393 	flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3394 	if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3395 		flags = (uint16_t)(flags | 2);
3396 		qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3397 	} else {
3398 		flags = (uint16_t)(flags | 1);
3399 	}
3400 
3401 	if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3402 		flags = (uint16_t)(flags | BIT_15);
3403 
3404 	if (dbuf->db_flags & DB_LU_DATA_BUF) {
3405 		/*
3406 		 * Data bufs from LU are in scatter/gather list format.
3407 		 */
3408 		cookie_count = qlt_get_cookie_count(dbuf);
3409 		rcnt = qlt_get_iocb_count(cookie_count);
3410 	} else {
3411 		cookie_count = 1;
3412 		rcnt = 1;
3413 	}
3414 	mutex_enter(&qlt->req_lock);
3415 	req = (uint8_t *)qlt_get_req_entries(qlt, rcnt);
3416 	if (req == NULL) {
3417 		mutex_exit(&qlt->req_lock);
3418 		return (FCT_BUSY);
3419 	}
3420 	bzero(req, IOCB_SIZE);	/* XXX needed ? */
3421 	req[0] = 0x12;
3422 	req[1] = rcnt;
3423 	req[2] = dbuf->db_handle;
3424 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3425 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3426 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
3427 	QMEM_WR16(qlt, req+12, cookie_count);
3428 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3429 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3430 	QMEM_WR16(qlt, req+0x1A, flags);
3431 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3432 	QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3433 	QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
3434 	if (dbuf->db_flags & DB_LU_DATA_BUF) {
3435 		uint8_t			*qptr;	/* qlt continuation segs */
3436 		uint16_t		cookie_resid;
3437 		uint16_t		cont_segs;
3438 		ddi_dma_cookie_t	cookie, *ckp;
3439 
3440 		/*
3441 		 * See if the dma cookies are in simple array format.
3442 		 */
3443 		ckp = qlt_get_cookie_array(dbuf);
3444 
3445 		/*
3446 		 * Program the first segment into main record.
3447 		 */
3448 		if (ckp) {
3449 			ASSERT(ckp->dmac_size);
3450 			QMEM_WR64(qlt, req+0x34, ckp->dmac_laddress);
3451 			QMEM_WR32(qlt, req+0x3c, ckp->dmac_size);
3452 		} else {
3453 			qlt_ddi_dma_nextcookie(dbuf, &cookie);
3454 			ASSERT(cookie.dmac_size);
3455 			QMEM_WR64(qlt, req+0x34, cookie.dmac_laddress);
3456 			QMEM_WR32(qlt, req+0x3c, cookie.dmac_size);
3457 		}
3458 		cookie_resid = cookie_count-1;
3459 
3460 		/*
3461 		 * Program remaining segments into continuation records.
3462 		 */
3463 		while (cookie_resid) {
3464 			req += IOCB_SIZE;
3465 			if (req >= (uint8_t *)qlt->resp_ptr) {
3466 				req = (uint8_t *)qlt->req_ptr;
3467 			}
3468 			req[0] = 0x0a;
3469 			req[1] = 1;
3470 			req[2] = req[3] = 0;	/* tidy */
3471 			qptr = &req[4];
3472 			for (cont_segs = CONT_A64_DATA_SEGMENTS;
3473 			    cont_segs && cookie_resid; cont_segs--) {
3474 
3475 				if (ckp) {
3476 					++ckp;		/* next cookie */
3477 					ASSERT(ckp->dmac_size != 0);
3478 					QMEM_WR64(qlt, qptr,
3479 					    ckp->dmac_laddress);
3480 					qptr += 8;	/* skip over laddress */
3481 					QMEM_WR32(qlt, qptr, ckp->dmac_size);
3482 					qptr += 4;	/* skip over size */
3483 				} else {
3484 					qlt_ddi_dma_nextcookie(dbuf, &cookie);
3485 					ASSERT(cookie.dmac_size != 0);
3486 					QMEM_WR64(qlt, qptr,
3487 					    cookie.dmac_laddress);
3488 					qptr += 8;	/* skip over laddress */
3489 					QMEM_WR32(qlt, qptr, cookie.dmac_size);
3490 					qptr += 4;	/* skip over size */
3491 				}
3492 				cookie_resid--;
3493 			}
3494 			/*
3495 			 * zero unused remainder of IOCB
3496 			 */
3497 			if (cont_segs) {
3498 				size_t resid;
3499 				resid = (size_t)((uintptr_t)(req+IOCB_SIZE) -
3500 				    (uintptr_t)qptr);
3501 				ASSERT(resid < IOCB_SIZE);
3502 				bzero(qptr, resid);
3503 			}
3504 		}
3505 	} else {
3506 		/* Single, contiguous buffer */
3507 		QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3508 		QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
3509 	}
3510 
3511 	qlt_submit_req_entries(qlt, rcnt);
3512 	mutex_exit(&qlt->req_lock);
3513 
3514 	return (STMF_SUCCESS);
3515 }
3516 
3517 /*
3518  * We must construct proper FCP_RSP_IU now. Here we only focus on
3519  * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3520  * we could have catched them before we enter here.
3521  */
3522 fct_status_t
qlt_send_status(qlt_state_t * qlt,fct_cmd_t * cmd)3523 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3524 {
3525 	qlt_cmd_t *qcmd		= (qlt_cmd_t *)cmd->cmd_fca_private;
3526 	scsi_task_t *task	= (scsi_task_t *)cmd->cmd_specific;
3527 	qlt_dmem_bctl_t *bctl;
3528 	uint32_t size;
3529 	uint8_t *req, *fcp_rsp_iu;
3530 	uint8_t *psd, sensbuf[24];		/* sense data */
3531 	uint16_t flags;
3532 	uint16_t scsi_status;
3533 	int use_mode2;
3534 	int ndx;
3535 
3536 	/*
3537 	 * Enter fast channel for non check condition
3538 	 */
3539 	if (task->task_scsi_status != STATUS_CHECK) {
3540 		/*
3541 		 * We will use mode1
3542 		 */
3543 		flags = (uint16_t)(BIT_6 | BIT_15 |
3544 		    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3545 		scsi_status = (uint16_t)task->task_scsi_status;
3546 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3547 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3548 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3549 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3550 		}
3551 		qcmd->dbuf_rsp_iu = NULL;
3552 
3553 		/*
3554 		 * Fillout CTIO type 7 IOCB
3555 		 */
3556 		mutex_enter(&qlt->req_lock);
3557 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3558 		if (req == NULL) {
3559 			mutex_exit(&qlt->req_lock);
3560 			return (FCT_BUSY);
3561 		}
3562 
3563 		/*
3564 		 * Common fields
3565 		 */
3566 		bzero(req, IOCB_SIZE);
3567 		req[0x00] = 0x12;
3568 		req[0x01] = 0x1;
3569 		req[0x02] = BIT_7;	/* indicate if it's a pure status req */
3570 		QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3571 		QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3572 		QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3573 		QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3574 
3575 		/*
3576 		 * Mode-specific fields
3577 		 */
3578 		QMEM_WR16(qlt, req + 0x1A, flags);
3579 		QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3580 		QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3581 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3582 
3583 		/*
3584 		 * Trigger FW to send SCSI status out
3585 		 */
3586 		qlt_submit_req_entries(qlt, 1);
3587 		mutex_exit(&qlt->req_lock);
3588 		return (STMF_SUCCESS);
3589 	}
3590 
3591 	ASSERT(task->task_scsi_status == STATUS_CHECK);
3592 	/*
3593 	 * Decide the SCSI status mode, that should be used
3594 	 */
3595 	use_mode2 = (task->task_sense_length > 24);
3596 
3597 	/*
3598 	 * Prepare required information per the SCSI status mode
3599 	 */
3600 	flags = (uint16_t)(BIT_15 |
3601 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3602 	if (use_mode2) {
3603 		flags = (uint16_t)(flags | BIT_7);
3604 
3605 		size = task->task_sense_length;
3606 		qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3607 		    task->task_sense_length, &size, 0);
3608 		if (!qcmd->dbuf_rsp_iu) {
3609 			return (FCT_ALLOC_FAILURE);
3610 		}
3611 
3612 		/*
3613 		 * Start to construct FCP_RSP IU
3614 		 */
3615 		fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3616 		bzero(fcp_rsp_iu, 24);
3617 
3618 		/*
3619 		 * FCP_RSP IU flags, byte10
3620 		 */
3621 		fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
3622 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3623 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
3624 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3625 			fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
3626 		}
3627 
3628 		/*
3629 		 * SCSI status code, byte11
3630 		 */
3631 		fcp_rsp_iu[11] = task->task_scsi_status;
3632 
3633 		/*
3634 		 * FCP_RESID (Overrun or underrun)
3635 		 */
3636 		fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
3637 		fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
3638 		fcp_rsp_iu[14] = (uint8_t)((task->task_resid >>  8) & 0xFF);
3639 		fcp_rsp_iu[15] = (uint8_t)((task->task_resid >>  0) & 0xFF);
3640 
3641 		/*
3642 		 * FCP_SNS_LEN
3643 		 */
3644 		fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
3645 		    0xFF);
3646 		fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
3647 		    0xFF);
3648 
3649 		/*
3650 		 * FCP_RSP_LEN
3651 		 */
3652 		/*
3653 		 * no FCP_RSP_INFO
3654 		 */
3655 		/*
3656 		 * FCP_SNS_INFO
3657 		 */
3658 		bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3659 		    task->task_sense_length);
3660 
3661 		/*
3662 		 * Ensure dma data consistency
3663 		 */
3664 		qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3665 	} else {
3666 		flags = (uint16_t)(flags | BIT_6);
3667 
3668 		scsi_status = (uint16_t)task->task_scsi_status;
3669 		if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3670 			scsi_status = (uint16_t)(scsi_status | BIT_10);
3671 		} else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3672 			scsi_status = (uint16_t)(scsi_status | BIT_11);
3673 		}
3674 		if (task->task_sense_length) {
3675 			scsi_status = (uint16_t)(scsi_status | BIT_9);
3676 		}
3677 		bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3678 		qcmd->dbuf_rsp_iu = NULL;
3679 	}
3680 
3681 	/*
3682 	 * Fillout CTIO type 7 IOCB
3683 	 */
3684 	mutex_enter(&qlt->req_lock);
3685 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3686 	if (req == NULL) {
3687 		mutex_exit(&qlt->req_lock);
3688 		if (use_mode2) {
3689 			qlt_dmem_free(cmd->cmd_port->port_fds,
3690 			    qcmd->dbuf_rsp_iu);
3691 			qcmd->dbuf_rsp_iu = NULL;
3692 		}
3693 		return (FCT_BUSY);
3694 	}
3695 
3696 	/*
3697 	 * Common fields
3698 	 */
3699 	bzero(req, IOCB_SIZE);
3700 	req[0x00] = 0x12;
3701 	req[0x01] = 0x1;
3702 	req[0x02] = BIT_7;	/* to indicate if it's a pure status req */
3703 	QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3704 	QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3705 	QMEM_WR16(qlt, req + 0x0A, 0);	/* not timed by FW */
3706 	if (use_mode2) {
3707 		QMEM_WR16(qlt, req+0x0C, 1);	/* FCP RSP IU data field */
3708 	}
3709 	QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3710 	QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
3711 
3712 	/*
3713 	 * Mode-specific fields
3714 	 */
3715 	if (!use_mode2) {
3716 		QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
3717 	}
3718 	QMEM_WR16(qlt, req + 0x1A, flags);
3719 	QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3720 	QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3721 	if (use_mode2) {
3722 		bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3723 		QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3724 		QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3725 		QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
3726 	} else {
3727 		QMEM_WR16(qlt, req + 0x22, scsi_status);
3728 		psd = req+0x28;
3729 
3730 		/*
3731 		 * Data in sense buf is always big-endian, data in IOCB
3732 		 * should always be little-endian, so we must do swapping.
3733 		 */
3734 		size = ((task->task_sense_length + 3) & (~3));
3735 		for (ndx = 0; ndx < size; ndx += 4) {
3736 			psd[ndx + 0] = sensbuf[ndx + 3];
3737 			psd[ndx + 1] = sensbuf[ndx + 2];
3738 			psd[ndx + 2] = sensbuf[ndx + 1];
3739 			psd[ndx + 3] = sensbuf[ndx + 0];
3740 		}
3741 	}
3742 
3743 	/*
3744 	 * Trigger FW to send SCSI status out
3745 	 */
3746 	qlt_submit_req_entries(qlt, 1);
3747 	mutex_exit(&qlt->req_lock);
3748 
3749 	return (STMF_SUCCESS);
3750 }
3751 
3752 fct_status_t
qlt_send_els_response(qlt_state_t * qlt,fct_cmd_t * cmd)3753 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3754 {
3755 	qlt_cmd_t	*qcmd;
3756 	fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3757 	uint8_t *req, *addr;
3758 	qlt_dmem_bctl_t *bctl;
3759 	uint32_t minsize;
3760 	uint8_t elsop, req1f;
3761 
3762 	addr = els->els_resp_payload;
3763 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3764 
3765 	minsize = els->els_resp_size;
3766 	qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3767 	if (qcmd->dbuf == NULL)
3768 		return (FCT_BUSY);
3769 
3770 	bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3771 
3772 	bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3773 	qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3774 
3775 	if (addr[0] == 0x02) {	/* ACC */
3776 		req1f = BIT_5;
3777 	} else {
3778 		req1f = BIT_6;
3779 	}
3780 	elsop = els->els_req_payload[0];
3781 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3782 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3783 		req1f = (uint8_t)(req1f | BIT_4);
3784 	}
3785 
3786 	mutex_enter(&qlt->req_lock);
3787 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3788 	if (req == NULL) {
3789 		mutex_exit(&qlt->req_lock);
3790 		qlt_dmem_free(NULL, qcmd->dbuf);
3791 		qcmd->dbuf = NULL;
3792 		return (FCT_BUSY);
3793 	}
3794 	bzero(req, IOCB_SIZE);
3795 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3796 	req[0x16] = elsop; req[0x1f] = req1f;
3797 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3798 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3799 	QMEM_WR16(qlt, (&req[0xC]), 1);
3800 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3801 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3802 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3803 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3804 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3805 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3806 	}
3807 	QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3808 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3809 	QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3810 	qlt_submit_req_entries(qlt, 1);
3811 	mutex_exit(&qlt->req_lock);
3812 
3813 	return (FCT_SUCCESS);
3814 }
3815 
3816 fct_status_t
qlt_send_abts_response(qlt_state_t * qlt,fct_cmd_t * cmd,int terminate)3817 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3818 {
3819 	qlt_abts_cmd_t *qcmd;
3820 	fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3821 	uint8_t *req;
3822 	uint32_t lportid;
3823 	uint32_t fctl;
3824 	int i;
3825 
3826 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3827 
3828 	mutex_enter(&qlt->req_lock);
3829 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3830 	if (req == NULL) {
3831 		mutex_exit(&qlt->req_lock);
3832 		return (FCT_BUSY);
3833 	}
3834 	bcopy(qcmd->buf, req, IOCB_SIZE);
3835 	lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3836 	fctl = QMEM_RD32(qlt, req+0x1C);
3837 	fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3838 	req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3839 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3840 	if (cmd->cmd_rp)
3841 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3842 	else
3843 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3844 	if (terminate) {
3845 		QMEM_WR16(qlt, (&req[0xC]), 1);
3846 	}
3847 	QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3848 	req[0x17] = abts->abts_resp_rctl;
3849 	QMEM_WR32(qlt, req+0x18, lportid);
3850 	QMEM_WR32(qlt, req+0x1C, fctl);
3851 	req[0x23]++;
3852 	for (i = 0; i < 12; i += 4) {
3853 		/* Take care of firmware's LE requirement */
3854 		req[0x2C+i] = abts->abts_resp_payload[i+3];
3855 		req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3856 		req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3857 		req[0x2C+i+3] = abts->abts_resp_payload[i];
3858 	}
3859 	qlt_submit_req_entries(qlt, 1);
3860 	mutex_exit(&qlt->req_lock);
3861 
3862 	return (FCT_SUCCESS);
3863 }
3864 
3865 static void
qlt_handle_inot(qlt_state_t * qlt,uint8_t * inot)3866 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3867 {
3868 	int i;
3869 	uint32_t d;
3870 	caddr_t req;
3871 	/* Just put it on the request queue */
3872 	mutex_enter(&qlt->req_lock);
3873 	req = qlt_get_req_entries(qlt, 1);
3874 	if (req == NULL) {
3875 		mutex_exit(&qlt->req_lock);
3876 		/* XXX handle this */
3877 		return;
3878 	}
3879 	for (i = 0; i < 16; i++) {
3880 		d = QMEM_RD32(qlt, inot);
3881 		inot += 4;
3882 		QMEM_WR32(qlt, req, d);
3883 		req += 4;
3884 	}
3885 	req -= 64;
3886 	req[0] = 0x0e;
3887 	qlt_submit_req_entries(qlt, 1);
3888 	mutex_exit(&qlt->req_lock);
3889 }
3890 
3891 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3892 static void
qlt_handle_atio(qlt_state_t * qlt,uint8_t * atio)3893 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3894 {
3895 	fct_cmd_t	*cmd;
3896 	scsi_task_t	*task;
3897 	qlt_cmd_t	*qcmd;
3898 	uint32_t	rportid, fw_xchg_addr;
3899 	uint8_t		*p, *q, *req, tm;
3900 	uint16_t	cdb_size, flags, oxid;
3901 	char		info[QLT_INFO_LEN];
3902 
3903 	/*
3904 	 * If either bidirection xfer is requested of there is extended
3905 	 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3906 	 */
3907 	cdb_size = 16;
3908 	if (atio[0x20 + 11] >= 3) {
3909 		uint8_t b = atio[0x20 + 11];
3910 		uint16_t b1;
3911 		if ((b & 3) == 3) {
3912 			EL(qlt, "bidirectional I/O not supported\n");
3913 			cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3914 			    "received, dropping the cmd as bidirectional "
3915 			    " transfers are not yet supported", qlt->instance);
3916 			/* XXX abort the I/O */
3917 			return;
3918 		}
3919 		cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3920 		/*
3921 		 * Verify that we have enough entries. Without additional CDB
3922 		 * Everything will fit nicely within the same 64 bytes. So the
3923 		 * additional cdb size is essentially the # of additional bytes
3924 		 * we need.
3925 		 */
3926 		b1 = (uint16_t)b;
3927 		if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3928 			EL(qlt, "extended cdb received\n");
3929 			cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3930 			    " cdb (cdb size = %d bytes), however the firmware "
3931 			    " did not DMAed the entire FCP_CMD IU, entry count "
3932 			    " is %d while it should be %d", qlt->instance,
3933 			    cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3934 			/* XXX abort the I/O */
3935 			return;
3936 		}
3937 	}
3938 
3939 	rportid = (((uint32_t)atio[8 + 5]) << 16) |
3940 	    (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3941 	fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3942 	oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3943 
3944 	if (fw_xchg_addr == 0xFFFFFFFF) {
3945 		EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3946 		cmd = NULL;
3947 	} else {
3948 		cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3949 		    rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3950 		if (cmd == NULL) {
3951 			EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3952 		}
3953 	}
3954 	if (cmd == NULL) {
3955 		EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3956 		/* Abort this IO */
3957 		flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3958 
3959 		mutex_enter(&qlt->req_lock);
3960 		req = (uint8_t *)qlt_get_req_entries(qlt, 1);
3961 		if (req == NULL) {
3962 			mutex_exit(&qlt->req_lock);
3963 
3964 			(void) snprintf(info, sizeof (info),
3965 			    "qlt_handle_atio: qlt-%p, can't "
3966 			    "allocate space for scsi_task", (void *)qlt);
3967 			(void) fct_port_shutdown(qlt->qlt_port,
3968 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3969 			return;
3970 		}
3971 		bzero(req, IOCB_SIZE);
3972 		req[0] = 0x12; req[1] = 0x1;
3973 		QMEM_WR32(qlt, req+4, 0);
3974 		QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3975 		    rportid));
3976 		QMEM_WR16(qlt, req+10, 60);
3977 		QMEM_WR32(qlt, req+0x10, rportid);
3978 		QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3979 		QMEM_WR16(qlt, req+0x1A, flags);
3980 		QMEM_WR16(qlt, req+0x20, oxid);
3981 		qlt_submit_req_entries(qlt, 1);
3982 		mutex_exit(&qlt->req_lock);
3983 
3984 		return;
3985 	}
3986 
3987 	task = (scsi_task_t *)cmd->cmd_specific;
3988 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3989 	qcmd->fw_xchg_addr = fw_xchg_addr;
3990 	qcmd->param.atio_byte3 = atio[3];
3991 	cmd->cmd_oxid = oxid;
3992 	cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3993 	    atio[8+19]);
3994 	cmd->cmd_rportid = rportid;
3995 	cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3996 	    (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3997 	cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3998 	/* Dont do a 64 byte read as this is IOMMU */
3999 	q = atio+0x28;
4000 	/* XXX Handle fcp_cntl */
4001 	task->task_cmd_seq_no = (uint32_t)(*q++);
4002 	task->task_csn_size = 8;
4003 	task->task_flags = qlt_task_flags[(*q++) & 7];
4004 	tm = *q++;
4005 	if (tm) {
4006 		if (tm & BIT_1)
4007 			task->task_mgmt_function = TM_ABORT_TASK_SET;
4008 		else if (tm & BIT_2)
4009 			task->task_mgmt_function = TM_CLEAR_TASK_SET;
4010 		else if (tm & BIT_4)
4011 			task->task_mgmt_function = TM_LUN_RESET;
4012 		else if (tm & BIT_5)
4013 			task->task_mgmt_function = TM_TARGET_COLD_RESET;
4014 		else if (tm & BIT_6)
4015 			task->task_mgmt_function = TM_CLEAR_ACA;
4016 		else
4017 			task->task_mgmt_function = TM_ABORT_TASK;
4018 	}
4019 	task->task_max_nbufs = STMF_BUFS_MAX;
4020 	task->task_csn_size = 8;
4021 	task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
4022 	p = task->task_cdb;
4023 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4024 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4025 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4026 	*p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4027 	if (cdb_size > 16) {
4028 		uint16_t xtra = (uint16_t)(cdb_size - 16);
4029 		uint16_t i;
4030 		uint8_t cb[4];
4031 
4032 		while (xtra) {
4033 			*p++ = *q++;
4034 			xtra--;
4035 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
4036 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4037 				q = (uint8_t *)qlt->queue_mem_ptr +
4038 				    ATIO_QUEUE_OFFSET;
4039 			}
4040 		}
4041 		for (i = 0; i < 4; i++) {
4042 			cb[i] = *q++;
4043 			if (q == ((uint8_t *)qlt->queue_mem_ptr +
4044 			    ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4045 				q = (uint8_t *)qlt->queue_mem_ptr +
4046 				    ATIO_QUEUE_OFFSET;
4047 			}
4048 		}
4049 		task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
4050 		    (((uint32_t)cb[1]) << 16) |
4051 		    (((uint32_t)cb[2]) << 8) | cb[3];
4052 	} else {
4053 		task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
4054 		    (((uint32_t)q[1]) << 16) |
4055 		    (((uint32_t)q[2]) << 8) | q[3];
4056 	}
4057 	fct_post_rcvd_cmd(cmd, 0);
4058 }
4059 
4060 static void
qlt_handle_dereg_completion(qlt_state_t * qlt,uint8_t * rsp)4061 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
4062 {
4063 	uint16_t status;
4064 	uint32_t portid;
4065 	uint32_t subcode1, subcode2;
4066 
4067 	status = QMEM_RD16(qlt, rsp+8);
4068 	portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
4069 	subcode1 = QMEM_RD32(qlt, rsp+0x14);
4070 	subcode2 = QMEM_RD32(qlt, rsp+0x18);
4071 
4072 	mutex_enter(&qlt->preq_lock);
4073 	if (portid != qlt->rp_id_in_dereg) {
4074 		int instance = ddi_get_instance(qlt->dip);
4075 
4076 		EL(qlt, "implicit logout reveived portid = %xh\n", portid);
4077 		cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
4078 		    " received when driver wasn't waiting for it",
4079 		    instance, portid);
4080 		mutex_exit(&qlt->preq_lock);
4081 		return;
4082 	}
4083 
4084 	if (status != 0) {
4085 		EL(qlt, "implicit logout completed for %xh with status %xh, "
4086 		    "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
4087 		    subcode2);
4088 		if (status == 0x31 && subcode1 == 0x0a) {
4089 			qlt->rp_dereg_status = FCT_SUCCESS;
4090 		} else {
4091 			EL(qlt, "implicit logout portid=%xh, status=%xh, "
4092 			    "subcode1=%xh, subcode2=%xh\n", portid, status,
4093 			    subcode1, subcode2);
4094 			qlt->rp_dereg_status =
4095 			    QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
4096 		}
4097 	} else {
4098 		qlt->rp_dereg_status = FCT_SUCCESS;
4099 	}
4100 	cv_signal(&qlt->rp_dereg_cv);
4101 	mutex_exit(&qlt->preq_lock);
4102 }
4103 
4104 /*
4105  * Note that when an ELS is aborted, the regular or aborted completion
4106  * (if any) gets posted before the abort IOCB comes back on response queue.
4107  */
4108 static void
qlt_handle_unsol_els_completion(qlt_state_t * qlt,uint8_t * rsp)4109 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4110 {
4111 	char		info[QLT_INFO_LEN];
4112 	fct_cmd_t	*cmd;
4113 	qlt_cmd_t	*qcmd;
4114 	uint32_t	hndl;
4115 	uint32_t	subcode1, subcode2;
4116 	uint16_t	status;
4117 
4118 	hndl = QMEM_RD32(qlt, rsp+4);
4119 	status = QMEM_RD16(qlt, rsp+8);
4120 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4121 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4122 
4123 	if (!CMD_HANDLE_VALID(hndl)) {
4124 		EL(qlt, "handle = %xh\n", hndl);
4125 		/*
4126 		 * This cannot happen for unsol els completion. This can
4127 		 * only happen when abort for an unsol els completes.
4128 		 * This condition indicates a firmware bug.
4129 		 */
4130 		(void) snprintf(info, sizeof (info),
4131 		    "qlt_handle_unsol_els_completion: "
4132 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4133 		    hndl, status, subcode1, subcode2, (void *)rsp);
4134 		(void) fct_port_shutdown(qlt->qlt_port,
4135 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4136 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4137 		return;
4138 	}
4139 
4140 	if (status == 5) {
4141 		/*
4142 		 * When an unsolicited els is aborted, the abort is done
4143 		 * by a ELSPT iocb with abort control. This is the aborted IOCB
4144 		 * and not the abortee. We will do the cleanup when the
4145 		 * IOCB which caused the abort, returns.
4146 		 */
4147 		EL(qlt, "status = %xh\n", status);
4148 		stmf_trace(0, "--UNSOL ELS returned with status 5 --");
4149 		return;
4150 	}
4151 
4152 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4153 	if (cmd == NULL) {
4154 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4155 		/*
4156 		 * Now why would this happen ???
4157 		 */
4158 		(void) snprintf(info, sizeof (info),
4159 		    "qlt_handle_unsol_els_completion: can not "
4160 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4161 		    (void *)rsp);
4162 		(void) fct_port_shutdown(qlt->qlt_port,
4163 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4164 
4165 		return;
4166 	}
4167 
4168 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4169 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4170 	if (qcmd->flags & QLT_CMD_ABORTING) {
4171 		/*
4172 		 * This is the same case as "if (status == 5)" above. The
4173 		 * only difference is that in this case the firmware actually
4174 		 * finished sending the response. So the abort attempt will
4175 		 * come back with status ?. We will handle it there.
4176 		 */
4177 		stmf_trace(0, "--UNSOL ELS finished while we are trying to "
4178 		    "abort it");
4179 		return;
4180 	}
4181 
4182 	if (qcmd->dbuf != NULL) {
4183 		qlt_dmem_free(NULL, qcmd->dbuf);
4184 		qcmd->dbuf = NULL;
4185 	}
4186 
4187 	if (status == 0) {
4188 		fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4189 	} else {
4190 		fct_send_response_done(cmd,
4191 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4192 	}
4193 }
4194 
4195 static void
qlt_handle_unsol_els_abort_completion(qlt_state_t * qlt,uint8_t * rsp)4196 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4197 {
4198 	char		info[QLT_INFO_LEN];
4199 	fct_cmd_t	*cmd;
4200 	qlt_cmd_t	*qcmd;
4201 	uint32_t	hndl;
4202 	uint32_t	subcode1, subcode2;
4203 	uint16_t	status;
4204 
4205 	hndl = QMEM_RD32(qlt, rsp+4);
4206 	status = QMEM_RD16(qlt, rsp+8);
4207 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4208 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4209 
4210 	if (!CMD_HANDLE_VALID(hndl)) {
4211 		EL(qlt, "handle = %xh\n", hndl);
4212 		ASSERT(hndl == 0);
4213 		/*
4214 		 * Someone has requested to abort it, but no one is waiting for
4215 		 * this completion.
4216 		 */
4217 		if ((status != 0) && (status != 8)) {
4218 			EL(qlt, "status = %xh\n", status);
4219 			/*
4220 			 * There could be exchange resource leakage, so
4221 			 * throw HBA fatal error event now
4222 			 */
4223 			(void) snprintf(info, sizeof (info),
4224 			    "qlt_handle_unsol_els_abort_completion: "
4225 			    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4226 			    hndl, status, subcode1, subcode2, (void *)rsp);
4227 			(void) fct_port_shutdown(qlt->qlt_port,
4228 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4229 			    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4230 			return;
4231 		}
4232 
4233 		return;
4234 	}
4235 
4236 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4237 	if (cmd == NULL) {
4238 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4239 		/*
4240 		 * Why would this happen ??
4241 		 */
4242 		(void) snprintf(info, sizeof (info),
4243 		    "qlt_handle_unsol_els_abort_completion: can not get "
4244 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4245 		    (void *)rsp);
4246 		(void) fct_port_shutdown(qlt->qlt_port,
4247 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4248 
4249 		return;
4250 	}
4251 
4252 	ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4253 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4254 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4255 
4256 	if (qcmd->dbuf != NULL) {
4257 		qlt_dmem_free(NULL, qcmd->dbuf);
4258 		qcmd->dbuf = NULL;
4259 	}
4260 
4261 	if (status == 0) {
4262 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4263 	} else if (status == 8) {
4264 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4265 	} else {
4266 		fct_cmd_fca_aborted(cmd,
4267 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4268 	}
4269 }
4270 
4271 static void
qlt_handle_sol_els_completion(qlt_state_t * qlt,uint8_t * rsp)4272 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4273 {
4274 	char		info[QLT_INFO_LEN];
4275 	fct_cmd_t	*cmd;
4276 	fct_els_t	*els;
4277 	qlt_cmd_t	*qcmd;
4278 	uint32_t	hndl;
4279 	uint32_t	subcode1, subcode2;
4280 	uint16_t	status;
4281 
4282 	hndl = QMEM_RD32(qlt, rsp+4);
4283 	status = QMEM_RD16(qlt, rsp+8);
4284 	subcode1 = QMEM_RD32(qlt, rsp+0x24);
4285 	subcode2 = QMEM_RD32(qlt, rsp+0x28);
4286 
4287 	if (!CMD_HANDLE_VALID(hndl)) {
4288 		EL(qlt, "handle = %xh\n", hndl);
4289 		/*
4290 		 * This cannot happen for sol els completion.
4291 		 */
4292 		(void) snprintf(info, sizeof (info),
4293 		    "qlt_handle_sol_els_completion: "
4294 		    "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4295 		    hndl, status, subcode1, subcode2, (void *)rsp);
4296 		(void) fct_port_shutdown(qlt->qlt_port,
4297 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4298 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4299 		return;
4300 	}
4301 
4302 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4303 	if (cmd == NULL) {
4304 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4305 		(void) snprintf(info, sizeof (info),
4306 		    "qlt_handle_sol_els_completion: can not "
4307 		    "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4308 		    (void *)rsp);
4309 		(void) fct_port_shutdown(qlt->qlt_port,
4310 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4311 
4312 		return;
4313 	}
4314 
4315 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4316 	els = (fct_els_t *)cmd->cmd_specific;
4317 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4318 	qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4319 
4320 	if (qcmd->flags & QLT_CMD_ABORTING) {
4321 		/*
4322 		 * We will handle it when the ABORT IO IOCB returns.
4323 		 */
4324 		return;
4325 	}
4326 
4327 	if (qcmd->dbuf != NULL) {
4328 		if (status == 0) {
4329 			qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4330 			bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4331 			    qcmd->param.resp_offset,
4332 			    els->els_resp_payload, els->els_resp_size);
4333 		}
4334 		qlt_dmem_free(NULL, qcmd->dbuf);
4335 		qcmd->dbuf = NULL;
4336 	}
4337 
4338 	if (status == 0) {
4339 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4340 	} else {
4341 		fct_send_cmd_done(cmd,
4342 		    QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4343 	}
4344 }
4345 
4346 static void
qlt_handle_ct_completion(qlt_state_t * qlt,uint8_t * rsp)4347 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4348 {
4349 	fct_cmd_t	*cmd;
4350 	fct_sol_ct_t	*ct;
4351 	qlt_cmd_t	*qcmd;
4352 	uint32_t	 hndl;
4353 	uint16_t	 status;
4354 	char		 info[QLT_INFO_LEN];
4355 
4356 	hndl = QMEM_RD32(qlt, rsp+4);
4357 	status = QMEM_RD16(qlt, rsp+8);
4358 
4359 	if (!CMD_HANDLE_VALID(hndl)) {
4360 		EL(qlt, "handle = %xh\n", hndl);
4361 		/*
4362 		 * Solicited commands will always have a valid handle.
4363 		 */
4364 		(void) snprintf(info, sizeof (info),
4365 		    "qlt_handle_ct_completion: "
4366 		    "hndl-%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4367 		(void) fct_port_shutdown(qlt->qlt_port,
4368 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4369 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4370 		return;
4371 	}
4372 
4373 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4374 	if (cmd == NULL) {
4375 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4376 		(void) snprintf(info, sizeof (info),
4377 		    "qlt_handle_ct_completion: cannot find "
4378 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4379 		    (void *)rsp);
4380 		(void) fct_port_shutdown(qlt->qlt_port,
4381 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4382 
4383 		return;
4384 	}
4385 
4386 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4387 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4388 	ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4389 
4390 	if (qcmd->flags & QLT_CMD_ABORTING) {
4391 		/*
4392 		 * We will handle it when ABORT IO IOCB returns;
4393 		 */
4394 		return;
4395 	}
4396 
4397 	ASSERT(qcmd->dbuf);
4398 	if (status == 0) {
4399 		qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4400 		bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4401 		    qcmd->param.resp_offset,
4402 		    ct->ct_resp_payload, ct->ct_resp_size);
4403 	}
4404 	qlt_dmem_free(NULL, qcmd->dbuf);
4405 	qcmd->dbuf = NULL;
4406 
4407 	if (status == 0) {
4408 		fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4409 	} else {
4410 		fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4411 	}
4412 }
4413 
4414 static void
qlt_handle_ctio_completion(qlt_state_t * qlt,uint8_t * rsp)4415 qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
4416 {
4417 	fct_cmd_t	*cmd;
4418 	scsi_task_t	*task;
4419 	qlt_cmd_t	*qcmd;
4420 	stmf_data_buf_t	*dbuf;
4421 	fct_status_t	fc_st;
4422 	uint32_t	iof = 0;
4423 	uint32_t	hndl;
4424 	uint16_t	status;
4425 	uint16_t	flags;
4426 	uint8_t		abort_req;
4427 	uint8_t		n;
4428 	char		info[QLT_INFO_LEN];
4429 
4430 	/* XXX: Check validity of the IOCB by checking 4th byte. */
4431 	hndl = QMEM_RD32(qlt, rsp+4);
4432 	status = QMEM_RD16(qlt, rsp+8);
4433 	flags = QMEM_RD16(qlt, rsp+0x1a);
4434 	n = rsp[2];
4435 
4436 	if (!CMD_HANDLE_VALID(hndl)) {
4437 		EL(qlt, "handle = %xh\n", hndl);
4438 		ASSERT(hndl == 0);
4439 		/*
4440 		 * Someone has requested to abort it, but no one is waiting for
4441 		 * this completion.
4442 		 */
4443 		EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4444 		    (void *)rsp);
4445 		if ((status != 1) && (status != 2)) {
4446 			EL(qlt, "status = %xh\n", status);
4447 			/*
4448 			 * There could be exchange resource leakage, so
4449 			 * throw HBA fatal error event now
4450 			 */
4451 			(void) snprintf(info, sizeof (info),
4452 			    "qlt_handle_ctio_completion: hndl-"
4453 			    "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4454 			(void) fct_port_shutdown(qlt->qlt_port,
4455 			    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4456 
4457 		}
4458 
4459 		return;
4460 	}
4461 
4462 	if (flags & BIT_14) {
4463 		abort_req = 1;
4464 		EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4465 		    (void *)rsp);
4466 	} else {
4467 		abort_req = 0;
4468 	}
4469 
4470 	cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4471 	if (cmd == NULL) {
4472 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4473 		(void) snprintf(info, sizeof (info),
4474 		    "qlt_handle_ctio_completion: cannot find "
4475 		    "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4476 		    (void *)rsp);
4477 		(void) fct_port_shutdown(qlt->qlt_port,
4478 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4479 
4480 		return;
4481 	}
4482 
4483 	task = (scsi_task_t *)cmd->cmd_specific;
4484 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4485 	if (qcmd->dbuf_rsp_iu) {
4486 		ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4487 		qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4488 		qcmd->dbuf_rsp_iu = NULL;
4489 	}
4490 
4491 	if ((status == 1) || (status == 2)) {
4492 		if (abort_req) {
4493 			fc_st = FCT_ABORT_SUCCESS;
4494 			iof = FCT_IOF_FCA_DONE;
4495 		} else {
4496 			fc_st = FCT_SUCCESS;
4497 			if (flags & BIT_15) {
4498 				iof = FCT_IOF_FCA_DONE;
4499 			}
4500 		}
4501 	} else {
4502 		EL(qlt, "status = %xh\n", status);
4503 		if ((status == 8) && abort_req) {
4504 			fc_st = FCT_NOT_FOUND;
4505 			iof = FCT_IOF_FCA_DONE;
4506 		} else {
4507 			fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
4508 		}
4509 	}
4510 	dbuf = NULL;
4511 	if (((n & BIT_7) == 0) && (!abort_req)) {
4512 		/* A completion of data xfer */
4513 		if (n == 0) {
4514 			dbuf = qcmd->dbuf;
4515 		} else {
4516 			dbuf = stmf_handle_to_buf(task, n);
4517 		}
4518 
4519 		ASSERT(dbuf != NULL);
4520 		if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4521 			qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4522 		if (flags & BIT_15) {
4523 			dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4524 			    DB_STATUS_GOOD_SENT);
4525 		}
4526 
4527 		dbuf->db_xfer_status = fc_st;
4528 		fct_scsi_data_xfer_done(cmd, dbuf, iof);
4529 		return;
4530 	}
4531 	if (!abort_req) {
4532 		/*
4533 		 * This was just a pure status xfer.
4534 		 */
4535 		fct_send_response_done(cmd, fc_st, iof);
4536 		return;
4537 	}
4538 
4539 	fct_cmd_fca_aborted(cmd, fc_st, iof);
4540 }
4541 
4542 static void
qlt_handle_sol_abort_completion(qlt_state_t * qlt,uint8_t * rsp)4543 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4544 {
4545 	char		info[QLT_INFO_LEN];
4546 	fct_cmd_t	*cmd;
4547 	qlt_cmd_t	*qcmd;
4548 	uint32_t	h;
4549 	uint16_t	status;
4550 
4551 	h = QMEM_RD32(qlt, rsp+4);
4552 	status = QMEM_RD16(qlt, rsp+8);
4553 
4554 	if (!CMD_HANDLE_VALID(h)) {
4555 		EL(qlt, "handle = %xh\n", h);
4556 		/*
4557 		 * Solicited commands always have a valid handle.
4558 		 */
4559 		(void) snprintf(info, sizeof (info),
4560 		    "qlt_handle_sol_abort_completion: hndl-"
4561 		    "%x, status-%x, rsp-%p", h, status, (void *)rsp);
4562 		(void) fct_port_shutdown(qlt->qlt_port,
4563 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4564 		    STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4565 		return;
4566 	}
4567 	cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4568 	if (cmd == NULL) {
4569 		EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4570 		/*
4571 		 * What happened to the cmd ??
4572 		 */
4573 		(void) snprintf(info, sizeof (info),
4574 		    "qlt_handle_sol_abort_completion: cannot "
4575 		    "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4576 		    (void *)rsp);
4577 		(void) fct_port_shutdown(qlt->qlt_port,
4578 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4579 
4580 		return;
4581 	}
4582 
4583 	ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4584 	    (cmd->cmd_type == FCT_CMD_SOL_CT));
4585 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4586 	if (qcmd->dbuf != NULL) {
4587 		qlt_dmem_free(NULL, qcmd->dbuf);
4588 		qcmd->dbuf = NULL;
4589 	}
4590 	ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4591 	if (status == 0) {
4592 		fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4593 	} else if (status == 0x31) {
4594 		fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4595 	} else {
4596 		fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4597 	}
4598 }
4599 
4600 static void
qlt_handle_rcvd_abts(qlt_state_t * qlt,uint8_t * resp)4601 qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
4602 {
4603 	qlt_abts_cmd_t	*qcmd;
4604 	fct_cmd_t	*cmd;
4605 	uint32_t	remote_portid;
4606 	char		info[QLT_INFO_LEN];
4607 
4608 	remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4609 	    ((uint32_t)(resp[0x1A])) << 16;
4610 	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4611 	    sizeof (qlt_abts_cmd_t), 0);
4612 	if (cmd == NULL) {
4613 		EL(qlt, "fct_alloc cmd==NULL\n");
4614 		(void) snprintf(info, sizeof (info),
4615 		    "qlt_handle_rcvd_abts: qlt-%p, can't "
4616 		    "allocate space for fct_cmd", (void *)qlt);
4617 		(void) fct_port_shutdown(qlt->qlt_port,
4618 		    STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4619 		return;
4620 	}
4621 
4622 	resp[0xC] = resp[0xD] = resp[0xE] = 0;
4623 	qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
4624 	bcopy(resp, qcmd->buf, IOCB_SIZE);
4625 	cmd->cmd_port = qlt->qlt_port;
4626 	cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4627 	if (cmd->cmd_rp_handle == 0xFFFF)
4628 		cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4629 
4630 	cmd->cmd_rportid = remote_portid;
4631 	cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4632 	    ((uint32_t)(resp[0x16])) << 16;
4633 	cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4634 	cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
4635 	fct_post_rcvd_cmd(cmd, 0);
4636 }
4637 
4638 static void
qlt_handle_abts_completion(qlt_state_t * qlt,uint8_t * resp)4639 qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
4640 {
4641 	uint16_t status;
4642 	char	info[QLT_INFO_LEN];
4643 
4644 	status = QMEM_RD16(qlt, resp+8);
4645 
4646 	if ((status == 0) || (status == 5)) {
4647 		return;
4648 	}
4649 	EL(qlt, "status = %xh\n", status);
4650 	(void) snprintf(info, sizeof (info),
4651 	    "ABTS completion failed %x/%x/%x resp_off %x",
4652 	    status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4653 	    ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
4654 	(void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4655 	    STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4656 }
4657 
4658 #ifdef	DEBUG
4659 uint32_t qlt_drop_abort_counter = 0;
4660 #endif
4661 
4662 fct_status_t
qlt_abort_cmd(struct fct_local_port * port,fct_cmd_t * cmd,uint32_t flags)4663 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4664 {
4665 	qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4666 
4667 	if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4668 	    (qlt->qlt_state == FCT_STATE_OFFLINING)) {
4669 		return (FCT_NOT_FOUND);
4670 	}
4671 
4672 #ifdef DEBUG
4673 	if (qlt_drop_abort_counter > 0) {
4674 		if (atomic_dec_32_nv(&qlt_drop_abort_counter) == 1)
4675 			return (FCT_SUCCESS);
4676 	}
4677 #endif
4678 
4679 	if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4680 		return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4681 	}
4682 
4683 	if (flags & FCT_IOF_FORCE_FCA_DONE) {
4684 		cmd->cmd_handle = 0;
4685 	}
4686 
4687 	if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
4688 		return (qlt_send_abts_response(qlt, cmd, 1));
4689 	}
4690 
4691 	if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4692 		return (qlt_abort_purex(qlt, cmd));
4693 	}
4694 
4695 	if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4696 	    (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4697 		return (qlt_abort_sol_cmd(qlt, cmd));
4698 	}
4699 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4700 
4701 	ASSERT(0);
4702 	return (FCT_FAILURE);
4703 }
4704 
4705 fct_status_t
qlt_abort_sol_cmd(qlt_state_t * qlt,fct_cmd_t * cmd)4706 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4707 {
4708 	uint8_t *req;
4709 	qlt_cmd_t *qcmd;
4710 
4711 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4712 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4713 	EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
4714 
4715 	mutex_enter(&qlt->req_lock);
4716 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4717 	if (req == NULL) {
4718 		mutex_exit(&qlt->req_lock);
4719 
4720 		return (FCT_BUSY);
4721 	}
4722 	bzero(req, IOCB_SIZE);
4723 	req[0] = 0x33; req[1] = 1;
4724 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4725 	if (cmd->cmd_rp) {
4726 		QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4727 	} else {
4728 		QMEM_WR16(qlt, req+8, 0xFFFF);
4729 	}
4730 
4731 	QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4732 	QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4733 	qlt_submit_req_entries(qlt, 1);
4734 	mutex_exit(&qlt->req_lock);
4735 
4736 	return (FCT_SUCCESS);
4737 }
4738 
4739 fct_status_t
qlt_abort_purex(qlt_state_t * qlt,fct_cmd_t * cmd)4740 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4741 {
4742 	uint8_t *req;
4743 	qlt_cmd_t *qcmd;
4744 	fct_els_t *els;
4745 	uint8_t elsop, req1f;
4746 
4747 	els = (fct_els_t *)cmd->cmd_specific;
4748 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4749 	elsop = els->els_req_payload[0];
4750 	EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4751 	    elsop);
4752 	req1f = 0x60;	/* Terminate xchg */
4753 	if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4754 	    (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4755 		req1f = (uint8_t)(req1f | BIT_4);
4756 	}
4757 
4758 	mutex_enter(&qlt->req_lock);
4759 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4760 	if (req == NULL) {
4761 		mutex_exit(&qlt->req_lock);
4762 
4763 		return (FCT_BUSY);
4764 	}
4765 
4766 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4767 	bzero(req, IOCB_SIZE);
4768 	req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4769 	req[0x16] = elsop; req[0x1f] = req1f;
4770 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4771 	if (cmd->cmd_rp) {
4772 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4773 		EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4774 	} else {
4775 		QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4776 		EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4777 	}
4778 
4779 	QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4780 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4781 	qlt_submit_req_entries(qlt, 1);
4782 	mutex_exit(&qlt->req_lock);
4783 
4784 	return (FCT_SUCCESS);
4785 }
4786 
4787 fct_status_t
qlt_abort_unsol_scsi_cmd(qlt_state_t * qlt,fct_cmd_t * cmd)4788 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4789 {
4790 	qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4791 	uint8_t *req;
4792 	uint16_t flags;
4793 
4794 	flags = (uint16_t)(BIT_14 |
4795 	    (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4796 	EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4797 
4798 	mutex_enter(&qlt->req_lock);
4799 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4800 	if (req == NULL) {
4801 		mutex_exit(&qlt->req_lock);
4802 
4803 		return (FCT_BUSY);
4804 	}
4805 
4806 	qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4807 	bzero(req, IOCB_SIZE);
4808 	req[0] = 0x12; req[1] = 0x1;
4809 	QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4810 	QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4811 	QMEM_WR16(qlt, req+10, 60);	/* 60 seconds timeout */
4812 	QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4813 	QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4814 	QMEM_WR16(qlt, req+0x1A, flags);
4815 	QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4816 	qlt_submit_req_entries(qlt, 1);
4817 	mutex_exit(&qlt->req_lock);
4818 
4819 	return (FCT_SUCCESS);
4820 }
4821 
4822 fct_status_t
qlt_send_cmd(fct_cmd_t * cmd)4823 qlt_send_cmd(fct_cmd_t *cmd)
4824 {
4825 	qlt_state_t *qlt;
4826 
4827 	qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
4828 	if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4829 		return (qlt_send_els(qlt, cmd));
4830 	} else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4831 		return (qlt_send_ct(qlt, cmd));
4832 	}
4833 	EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4834 
4835 	ASSERT(0);
4836 	return (FCT_FAILURE);
4837 }
4838 
4839 fct_status_t
qlt_send_els(qlt_state_t * qlt,fct_cmd_t * cmd)4840 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4841 {
4842 	uint8_t *req;
4843 	fct_els_t *els;
4844 	qlt_cmd_t *qcmd;
4845 	stmf_data_buf_t *buf;
4846 	qlt_dmem_bctl_t *bctl;
4847 	uint32_t sz, minsz;
4848 
4849 	els = (fct_els_t *)cmd->cmd_specific;
4850 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4851 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4852 	qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4853 	sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4854 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4855 	if (buf == NULL) {
4856 		return (FCT_BUSY);
4857 	}
4858 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4859 
4860 	qcmd->dbuf = buf;
4861 	bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4862 	    els->els_req_size);
4863 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4864 
4865 	mutex_enter(&qlt->req_lock);
4866 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4867 	if (req == NULL) {
4868 		qlt_dmem_free(NULL, buf);
4869 		mutex_exit(&qlt->req_lock);
4870 		return (FCT_BUSY);
4871 	}
4872 	bzero(req, IOCB_SIZE);
4873 	req[0] = 0x53; req[1] = 1;
4874 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4875 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4876 	QMEM_WR16(qlt, (&req[0xC]), 1);
4877 	QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4878 	QMEM_WR16(qlt, (&req[0x14]), 1);
4879 	req[0x16] = els->els_req_payload[0];
4880 	if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4881 		req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4882 		req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
4883 		req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4884 	}
4885 	QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4886 	QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4887 	QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4888 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4889 	QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4890 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4891 	    qcmd->param.resp_offset));
4892 	QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4893 	qlt_submit_req_entries(qlt, 1);
4894 	mutex_exit(&qlt->req_lock);
4895 
4896 	return (FCT_SUCCESS);
4897 }
4898 
4899 fct_status_t
qlt_send_ct(qlt_state_t * qlt,fct_cmd_t * cmd)4900 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4901 {
4902 	uint8_t *req;
4903 	fct_sol_ct_t *ct;
4904 	qlt_cmd_t *qcmd;
4905 	stmf_data_buf_t *buf;
4906 	qlt_dmem_bctl_t *bctl;
4907 	uint32_t sz, minsz;
4908 
4909 	ct = (fct_sol_ct_t *)cmd->cmd_specific;
4910 	qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4911 	qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4912 	qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4913 	sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4914 	buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4915 	if (buf == NULL) {
4916 		return (FCT_BUSY);
4917 	}
4918 	bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4919 
4920 	qcmd->dbuf = buf;
4921 	bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4922 	    ct->ct_req_size);
4923 	qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4924 
4925 	mutex_enter(&qlt->req_lock);
4926 	req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4927 	if (req == NULL) {
4928 		qlt_dmem_free(NULL, buf);
4929 		mutex_exit(&qlt->req_lock);
4930 		return (FCT_BUSY);
4931 	}
4932 	bzero(req, IOCB_SIZE);
4933 	req[0] = 0x29; req[1] = 1;
4934 	QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4935 	QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4936 	QMEM_WR16(qlt, (&req[0xC]), 1);
4937 	QMEM_WR16(qlt, (&req[0x10]), 0x20);	/* > (2 * RA_TOV) */
4938 	QMEM_WR16(qlt, (&req[0x14]), 1);
4939 
4940 	QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4941 	QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4942 
4943 	QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4944 	QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4945 	QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4946 	    qcmd->param.resp_offset));		/* RESPONSE DSD */
4947 	QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4948 
4949 	qlt_submit_req_entries(qlt, 1);
4950 	mutex_exit(&qlt->req_lock);
4951 
4952 	return (FCT_SUCCESS);
4953 }
4954 
4955 
4956 /*
4957  * All QLT_FIRMWARE_* will mainly be handled in this function
4958  * It can not be called in interrupt context
4959  *
4960  * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4961  * and qlt_ioctl_lock
4962  */
4963 static fct_status_t
qlt_firmware_dump(fct_local_port_t * port,stmf_state_change_info_t * ssci)4964 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4965 {
4966 	qlt_state_t	*qlt = (qlt_state_t *)port->port_fca_private;
4967 	int		i;
4968 	int		retries, n;
4969 	uint_t		size_left;
4970 	char		c = ' ';
4971 	uint32_t	addr, endaddr, words_to_read;
4972 	caddr_t		buf;
4973 	fct_status_t	ret;
4974 
4975 	mutex_enter(&qlt->qlt_ioctl_lock);
4976 	/*
4977 	 * To make sure that there's no outstanding dumping task
4978 	 */
4979 	if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4980 		mutex_exit(&qlt->qlt_ioctl_lock);
4981 		EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4982 		    qlt->qlt_ioctl_flags);
4983 		EL(qlt, "outstanding\n");
4984 		return (FCT_FAILURE);
4985 	}
4986 
4987 	/*
4988 	 * To make sure not to overwrite existing dump
4989 	 */
4990 	if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4991 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4992 	    !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4993 		/*
4994 		 * If we have alreay one dump, but it's not triggered by user
4995 		 * and the user hasn't fetched it, we shouldn't dump again.
4996 		 */
4997 		mutex_exit(&qlt->qlt_ioctl_lock);
4998 		EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
4999 		    qlt->qlt_ioctl_flags);
5000 		cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
5001 		    "is one already outstanding.", qlt->instance);
5002 		return (FCT_FAILURE);
5003 	}
5004 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
5005 	if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5006 		qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
5007 	} else {
5008 		qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
5009 	}
5010 	mutex_exit(&qlt->qlt_ioctl_lock);
5011 
5012 	size_left = QLT_FWDUMP_BUFSIZE;
5013 	if (!qlt->qlt_fwdump_buf) {
5014 		ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
5015 		/*
5016 		 * It's the only place that we allocate buf for dumping. After
5017 		 * it's allocated, we will use it until the port is detached.
5018 		 */
5019 		qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
5020 	}
5021 
5022 	/*
5023 	 * Start to dump firmware
5024 	 */
5025 	buf = (caddr_t)qlt->qlt_fwdump_buf;
5026 
5027 	/*
5028 	 * Print the ISP firmware revision number and attributes information
5029 	 * Read the RISC to Host Status register
5030 	 */
5031 	n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
5032 	    "Attributes %04x\n\nR2H Status Register\n%08x",
5033 	    qlt->fw_major, qlt->fw_minor,
5034 	    qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
5035 	buf += n; size_left -= n;
5036 
5037 	/*
5038 	 * Before pausing the RISC, make sure no mailbox can execute
5039 	 */
5040 	mutex_enter(&qlt->mbox_lock);
5041 	if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
5042 		/*
5043 		 * Wait to grab the mailboxes
5044 		 */
5045 		for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
5046 		    (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
5047 			(void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
5048 			    ddi_get_lbolt() + drv_usectohz(1000000));
5049 			if (retries > 5) {
5050 				mutex_exit(&qlt->mbox_lock);
5051 				EL(qlt, "can't drain out mailbox commands\n");
5052 				goto dump_fail;
5053 			}
5054 		}
5055 		qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
5056 		cv_broadcast(&qlt->mbox_cv);
5057 	}
5058 	mutex_exit(&qlt->mbox_lock);
5059 
5060 	/*
5061 	 * Pause the RISC processor
5062 	 */
5063 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
5064 
5065 	/*
5066 	 * Wait for the RISC processor to pause
5067 	 */
5068 	for (i = 0; i < 200; i++) {
5069 		if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
5070 			break;
5071 		}
5072 		drv_usecwait(1000);
5073 	}
5074 	if (i == 200) {
5075 		EL(qlt, "can't pause\n");
5076 		return (FCT_FAILURE);
5077 	}
5078 
5079 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
5080 		goto over_25xx_specific_dump;
5081 	}
5082 	n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
5083 	buf += n; size_left -= n;
5084 	REG_WR32(qlt, 0x54, 0x7000);
5085 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5086 	buf += n; size_left -= n;
5087 	REG_WR32(qlt, 0x54, 0x7010);
5088 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5089 	buf += n; size_left -= n;
5090 	REG_WR32(qlt, 0x54, 0x7C00);
5091 
5092 	n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
5093 	buf += n; size_left -= n;
5094 	REG_WR32(qlt, 0xC0, 0x1);
5095 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
5096 	buf += n; size_left -= n;
5097 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
5098 	buf += n; size_left -= n;
5099 	REG_WR32(qlt, 0xC0, 0x0);
5100 
5101 over_25xx_specific_dump:;
5102 	n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
5103 	buf += n; size_left -= n;
5104 	/*
5105 	 * Capture data from 32 regsiters
5106 	 */
5107 	n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
5108 	buf += n; size_left -= n;
5109 
5110 	/*
5111 	 * Disable interrupts
5112 	 */
5113 	REG_WR32(qlt, 0xc, 0);
5114 
5115 	/*
5116 	 * Shadow registers
5117 	 */
5118 	n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
5119 	buf += n; size_left -= n;
5120 
5121 	REG_WR32(qlt, 0x54, 0xF70);
5122 	addr = 0xb0000000;
5123 	for (i = 0; i < 0xb; i++) {
5124 		if ((!qlt->qlt_25xx_chip) &&
5125 		    (!qlt->qlt_81xx_chip) &&
5126 		    (i >= 7)) {
5127 			break;
5128 		}
5129 		if (i && ((i & 7) == 0)) {
5130 			n = (int)snprintf(buf, size_left, "\n");
5131 			buf += n; size_left -= n;
5132 		}
5133 		REG_WR32(qlt, 0xF0, addr);
5134 		n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
5135 		buf += n; size_left -= n;
5136 		addr += 0x100000;
5137 	}
5138 
5139 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5140 		REG_WR32(qlt, 0x54, 0x10);
5141 		n = (int)snprintf(buf, size_left,
5142 		    "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
5143 		buf += n; size_left -= n;
5144 	}
5145 
5146 	/*
5147 	 * Mailbox registers
5148 	 */
5149 	n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
5150 	buf += n; size_left -= n;
5151 	for (i = 0; i < 32; i += 2) {
5152 		if ((i + 2) & 15) {
5153 			c = ' ';
5154 		} else {
5155 			c = '\n';
5156 		}
5157 		n = (int)snprintf(buf, size_left, "%04x %04x%c",
5158 		    REG_RD16(qlt, 0x80 + (i << 1)),
5159 		    REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
5160 		buf += n; size_left -= n;
5161 	}
5162 
5163 	/*
5164 	 * Transfer sequence registers
5165 	 */
5166 	n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
5167 	buf += n; size_left -= n;
5168 
5169 	REG_WR32(qlt, 0x54, 0xBF00);
5170 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5171 	buf += n; size_left -= n;
5172 	REG_WR32(qlt, 0x54, 0xBF10);
5173 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5174 	buf += n; size_left -= n;
5175 	REG_WR32(qlt, 0x54, 0xBF20);
5176 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5177 	buf += n; size_left -= n;
5178 	REG_WR32(qlt, 0x54, 0xBF30);
5179 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5180 	buf += n; size_left -= n;
5181 	REG_WR32(qlt, 0x54, 0xBF40);
5182 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5183 	buf += n; size_left -= n;
5184 	REG_WR32(qlt, 0x54, 0xBF50);
5185 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5186 	buf += n; size_left -= n;
5187 	REG_WR32(qlt, 0x54, 0xBF60);
5188 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5189 	buf += n; size_left -= n;
5190 	REG_WR32(qlt, 0x54, 0xBF70);
5191 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5192 	buf += n; size_left -= n;
5193 	n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
5194 	buf += n; size_left -= n;
5195 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5196 		REG_WR32(qlt, 0x54, 0xBFC0);
5197 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5198 		buf += n; size_left -= n;
5199 		REG_WR32(qlt, 0x54, 0xBFD0);
5200 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5201 		buf += n; size_left -= n;
5202 	}
5203 	REG_WR32(qlt, 0x54, 0xBFE0);
5204 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5205 	buf += n; size_left -= n;
5206 	n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5207 	buf += n; size_left -= n;
5208 	REG_WR32(qlt, 0x54, 0xBFF0);
5209 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5210 	buf += n; size_left -= n;
5211 
5212 	/*
5213 	 * Receive sequence registers
5214 	 */
5215 	n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
5216 	buf += n; size_left -= n;
5217 	REG_WR32(qlt, 0x54, 0xFF00);
5218 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5219 	buf += n; size_left -= n;
5220 	REG_WR32(qlt, 0x54, 0xFF10);
5221 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5222 	buf += n; size_left -= n;
5223 	REG_WR32(qlt, 0x54, 0xFF20);
5224 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5225 	buf += n; size_left -= n;
5226 	REG_WR32(qlt, 0x54, 0xFF30);
5227 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5228 	buf += n; size_left -= n;
5229 	REG_WR32(qlt, 0x54, 0xFF40);
5230 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5231 	buf += n; size_left -= n;
5232 	REG_WR32(qlt, 0x54, 0xFF50);
5233 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5234 	buf += n; size_left -= n;
5235 	REG_WR32(qlt, 0x54, 0xFF60);
5236 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5237 	buf += n; size_left -= n;
5238 	REG_WR32(qlt, 0x54, 0xFF70);
5239 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5240 	buf += n; size_left -= n;
5241 	n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5242 	buf += n; size_left -= n;
5243 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5244 		REG_WR32(qlt, 0x54, 0xFFC0);
5245 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5246 		buf += n; size_left -= n;
5247 	}
5248 	REG_WR32(qlt, 0x54, 0xFFD0);
5249 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5250 	buf += n; size_left -= n;
5251 	n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5252 	buf += n; size_left -= n;
5253 	REG_WR32(qlt, 0x54, 0xFFE0);
5254 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5255 	buf += n; size_left -= n;
5256 	n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5257 	buf += n; size_left -= n;
5258 	REG_WR32(qlt, 0x54, 0xFFF0);
5259 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5260 	buf += n; size_left -= n;
5261 
5262 	if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
5263 		goto over_aseq_regs;
5264 
5265 	/*
5266 	 * Auxiliary sequencer registers
5267 	 */
5268 	n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
5269 	buf += n; size_left -= n;
5270 	REG_WR32(qlt, 0x54, 0xB000);
5271 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5272 	buf += n; size_left -= n;
5273 	REG_WR32(qlt, 0x54, 0xB010);
5274 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5275 	buf += n; size_left -= n;
5276 	REG_WR32(qlt, 0x54, 0xB020);
5277 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5278 	buf += n; size_left -= n;
5279 	REG_WR32(qlt, 0x54, 0xB030);
5280 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5281 	buf += n; size_left -= n;
5282 	REG_WR32(qlt, 0x54, 0xB040);
5283 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5284 	buf += n; size_left -= n;
5285 	REG_WR32(qlt, 0x54, 0xB050);
5286 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5287 	buf += n; size_left -= n;
5288 	REG_WR32(qlt, 0x54, 0xB060);
5289 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5290 	buf += n; size_left -= n;
5291 	REG_WR32(qlt, 0x54, 0xB070);
5292 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5293 	buf += n; size_left -= n;
5294 	n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5295 	buf += n; size_left -= n;
5296 	REG_WR32(qlt, 0x54, 0xB0C0);
5297 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5298 	buf += n; size_left -= n;
5299 	REG_WR32(qlt, 0x54, 0xB0D0);
5300 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5301 	buf += n; size_left -= n;
5302 	n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5303 	buf += n; size_left -= n;
5304 	REG_WR32(qlt, 0x54, 0xB0E0);
5305 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5306 	buf += n; size_left -= n;
5307 	n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5308 	buf += n; size_left -= n;
5309 	REG_WR32(qlt, 0x54, 0xB0F0);
5310 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5311 	buf += n; size_left -= n;
5312 
5313 over_aseq_regs:;
5314 
5315 	/*
5316 	 * Command DMA registers
5317 	 */
5318 	n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5319 	buf += n; size_left -= n;
5320 	REG_WR32(qlt, 0x54, 0x7100);
5321 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5322 	buf += n; size_left -= n;
5323 
5324 	/*
5325 	 * Queues
5326 	 */
5327 	n = (int)snprintf(buf, size_left,
5328 	    "\nRequest0 Queue DMA Channel registers\n");
5329 	buf += n; size_left -= n;
5330 	REG_WR32(qlt, 0x54, 0x7200);
5331 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5332 	buf += n; size_left -= n;
5333 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5334 	buf += n; size_left -= n;
5335 
5336 	n = (int)snprintf(buf, size_left,
5337 	    "\n\nResponse0 Queue DMA Channel registers\n");
5338 	buf += n; size_left -= n;
5339 	REG_WR32(qlt, 0x54, 0x7300);
5340 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5341 	buf += n; size_left -= n;
5342 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5343 	buf += n; size_left -= n;
5344 
5345 	n = (int)snprintf(buf, size_left,
5346 	    "\n\nRequest1 Queue DMA Channel registers\n");
5347 	buf += n; size_left -= n;
5348 	REG_WR32(qlt, 0x54, 0x7400);
5349 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5350 	buf += n; size_left -= n;
5351 	n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5352 	buf += n; size_left -= n;
5353 
5354 	/*
5355 	 * Transmit DMA registers
5356 	 */
5357 	n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
5358 	buf += n; size_left -= n;
5359 	REG_WR32(qlt, 0x54, 0x7600);
5360 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5361 	buf += n; size_left -= n;
5362 	REG_WR32(qlt, 0x54, 0x7610);
5363 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5364 	buf += n; size_left -= n;
5365 	n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
5366 	buf += n; size_left -= n;
5367 	REG_WR32(qlt, 0x54, 0x7620);
5368 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5369 	buf += n; size_left -= n;
5370 	REG_WR32(qlt, 0x54, 0x7630);
5371 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5372 	buf += n; size_left -= n;
5373 	n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
5374 	buf += n; size_left -= n;
5375 	REG_WR32(qlt, 0x54, 0x7640);
5376 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5377 	buf += n; size_left -= n;
5378 	REG_WR32(qlt, 0x54, 0x7650);
5379 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5380 	buf += n; size_left -= n;
5381 	n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
5382 	buf += n; size_left -= n;
5383 	REG_WR32(qlt, 0x54, 0x7660);
5384 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5385 	buf += n; size_left -= n;
5386 	REG_WR32(qlt, 0x54, 0x7670);
5387 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5388 	buf += n; size_left -= n;
5389 	n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
5390 	buf += n; size_left -= n;
5391 	REG_WR32(qlt, 0x54, 0x7680);
5392 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5393 	buf += n; size_left -= n;
5394 	REG_WR32(qlt, 0x54, 0x7690);
5395 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5396 	buf += n; size_left -= n;
5397 	n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
5398 	buf += n; size_left -= n;
5399 	REG_WR32(qlt, 0x54, 0x76A0);
5400 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5401 	buf += n; size_left -= n;
5402 
5403 	/*
5404 	 * Receive DMA registers
5405 	 */
5406 	n = (int)snprintf(buf, size_left,
5407 	    "\nRCV Thread 0 Data DMA registers\n");
5408 	buf += n; size_left -= n;
5409 	REG_WR32(qlt, 0x54, 0x7700);
5410 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5411 	buf += n; size_left -= n;
5412 	REG_WR32(qlt, 0x54, 0x7710);
5413 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5414 	buf += n; size_left -= n;
5415 	n = (int)snprintf(buf, size_left,
5416 	    "\nRCV Thread 1 Data DMA registers\n");
5417 	buf += n; size_left -= n;
5418 	REG_WR32(qlt, 0x54, 0x7720);
5419 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5420 	buf += n; size_left -= n;
5421 	REG_WR32(qlt, 0x54, 0x7730);
5422 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5423 	buf += n; size_left -= n;
5424 
5425 	/*
5426 	 * RISC registers
5427 	 */
5428 	n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
5429 	buf += n; size_left -= n;
5430 	REG_WR32(qlt, 0x54, 0x0F00);
5431 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5432 	buf += n; size_left -= n;
5433 	REG_WR32(qlt, 0x54, 0x0F10);
5434 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5435 	buf += n; size_left -= n;
5436 	REG_WR32(qlt, 0x54, 0x0F20);
5437 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5438 	buf += n; size_left -= n;
5439 	REG_WR32(qlt, 0x54, 0x0F30);
5440 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5441 	buf += n; size_left -= n;
5442 	REG_WR32(qlt, 0x54, 0x0F40);
5443 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5444 	buf += n; size_left -= n;
5445 	REG_WR32(qlt, 0x54, 0x0F50);
5446 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5447 	buf += n; size_left -= n;
5448 	REG_WR32(qlt, 0x54, 0x0F60);
5449 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5450 	buf += n; size_left -= n;
5451 	REG_WR32(qlt, 0x54, 0x0F70);
5452 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5453 	buf += n; size_left -= n;
5454 
5455 	/*
5456 	 * Local memory controller registers
5457 	 */
5458 	n = (int)snprintf(buf, size_left, "\nLMC registers\n");
5459 	buf += n; size_left -= n;
5460 	REG_WR32(qlt, 0x54, 0x3000);
5461 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5462 	buf += n; size_left -= n;
5463 	REG_WR32(qlt, 0x54, 0x3010);
5464 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5465 	buf += n; size_left -= n;
5466 	REG_WR32(qlt, 0x54, 0x3020);
5467 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5468 	buf += n; size_left -= n;
5469 	REG_WR32(qlt, 0x54, 0x3030);
5470 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5471 	buf += n; size_left -= n;
5472 	REG_WR32(qlt, 0x54, 0x3040);
5473 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5474 	buf += n; size_left -= n;
5475 	REG_WR32(qlt, 0x54, 0x3050);
5476 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5477 	buf += n; size_left -= n;
5478 	REG_WR32(qlt, 0x54, 0x3060);
5479 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5480 	buf += n; size_left -= n;
5481 
5482 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5483 		REG_WR32(qlt, 0x54, 0x3070);
5484 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5485 		buf += n; size_left -= n;
5486 	}
5487 
5488 	/*
5489 	 * Fibre protocol module regsiters
5490 	 */
5491 	n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5492 	buf += n; size_left -= n;
5493 	REG_WR32(qlt, 0x54, 0x4000);
5494 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5495 	buf += n; size_left -= n;
5496 	REG_WR32(qlt, 0x54, 0x4010);
5497 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5498 	buf += n; size_left -= n;
5499 	REG_WR32(qlt, 0x54, 0x4020);
5500 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5501 	buf += n; size_left -= n;
5502 	REG_WR32(qlt, 0x54, 0x4030);
5503 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5504 	buf += n; size_left -= n;
5505 	REG_WR32(qlt, 0x54, 0x4040);
5506 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5507 	buf += n; size_left -= n;
5508 	REG_WR32(qlt, 0x54, 0x4050);
5509 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5510 	buf += n; size_left -= n;
5511 	REG_WR32(qlt, 0x54, 0x4060);
5512 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5513 	buf += n; size_left -= n;
5514 	REG_WR32(qlt, 0x54, 0x4070);
5515 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5516 	buf += n; size_left -= n;
5517 	REG_WR32(qlt, 0x54, 0x4080);
5518 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5519 	buf += n; size_left -= n;
5520 	REG_WR32(qlt, 0x54, 0x4090);
5521 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5522 	buf += n; size_left -= n;
5523 	REG_WR32(qlt, 0x54, 0x40A0);
5524 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5525 	buf += n; size_left -= n;
5526 	REG_WR32(qlt, 0x54, 0x40B0);
5527 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5528 	buf += n; size_left -= n;
5529 	if (qlt->qlt_81xx_chip) {
5530 		REG_WR32(qlt, 0x54, 0x40C0);
5531 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5532 		buf += n; size_left -= n;
5533 		REG_WR32(qlt, 0x54, 0x40D0);
5534 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5535 		buf += n; size_left -= n;
5536 	}
5537 
5538 	/*
5539 	 * Fibre buffer registers
5540 	 */
5541 	n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5542 	buf += n; size_left -= n;
5543 	REG_WR32(qlt, 0x54, 0x6000);
5544 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5545 	buf += n; size_left -= n;
5546 	REG_WR32(qlt, 0x54, 0x6010);
5547 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5548 	buf += n; size_left -= n;
5549 	REG_WR32(qlt, 0x54, 0x6020);
5550 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5551 	buf += n; size_left -= n;
5552 	REG_WR32(qlt, 0x54, 0x6030);
5553 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5554 	buf += n; size_left -= n;
5555 	REG_WR32(qlt, 0x54, 0x6040);
5556 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5557 	buf += n; size_left -= n;
5558 	REG_WR32(qlt, 0x54, 0x6100);
5559 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5560 	buf += n; size_left -= n;
5561 	REG_WR32(qlt, 0x54, 0x6130);
5562 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5563 	buf += n; size_left -= n;
5564 	REG_WR32(qlt, 0x54, 0x6150);
5565 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5566 	buf += n; size_left -= n;
5567 	REG_WR32(qlt, 0x54, 0x6170);
5568 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5569 	buf += n; size_left -= n;
5570 	REG_WR32(qlt, 0x54, 0x6190);
5571 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5572 	buf += n; size_left -= n;
5573 	REG_WR32(qlt, 0x54, 0x61B0);
5574 	n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5575 	buf += n; size_left -= n;
5576 	if (qlt->qlt_81xx_chip) {
5577 		REG_WR32(qlt, 0x54, 0x61C0);
5578 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5579 		buf += n; size_left -= n;
5580 	}
5581 	if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
5582 		REG_WR32(qlt, 0x54, 0x6F00);
5583 		n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5584 		buf += n; size_left -= n;
5585 	}
5586 
5587 	qlt->intr_sneak_counter = 10;
5588 	mutex_enter(&qlt->intr_lock);
5589 	(void) qlt_reset_chip(qlt);
5590 	drv_usecwait(20);
5591 	qlt->intr_sneak_counter = 0;
5592 	mutex_exit(&qlt->intr_lock);
5593 
5594 	/*
5595 	 * Memory
5596 	 */
5597 	n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5598 	buf += n; size_left -= n;
5599 
5600 	addr = 0x20000;
5601 	endaddr = 0x22000;
5602 	words_to_read = 0;
5603 	while (addr < endaddr) {
5604 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5605 		if ((words_to_read + addr) > endaddr) {
5606 			words_to_read = endaddr - addr;
5607 		}
5608 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5609 		    QLT_SUCCESS) {
5610 			EL(qlt, "Error reading risc ram - CODE RAM status="
5611 			    "%llxh\n", ret);
5612 			goto dump_fail;
5613 		}
5614 
5615 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5616 		buf += n; size_left -= n;
5617 
5618 		if (size_left < 100000) {
5619 			EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5620 			    size_left);
5621 			goto dump_ok;
5622 		}
5623 		addr += words_to_read;
5624 	}
5625 
5626 	n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
5627 	buf += n; size_left -= n;
5628 
5629 	addr = 0x100000;
5630 	endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5631 	endaddr++;
5632 	if (endaddr & 7) {
5633 		endaddr = (endaddr + 7) & 0xFFFFFFF8;
5634 	}
5635 
5636 	words_to_read = 0;
5637 	while (addr < endaddr) {
5638 		words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5639 		if ((words_to_read + addr) > endaddr) {
5640 			words_to_read = endaddr - addr;
5641 		}
5642 		if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5643 		    QLT_SUCCESS) {
5644 			EL(qlt, "Error reading risc ram - EXT RAM status="
5645 			    "%llxh\n", ret);
5646 			goto dump_fail;
5647 		}
5648 		n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5649 		buf += n; size_left -= n;
5650 		if (size_left < 100000) {
5651 			EL(qlt, "run out of space - EXT RAM\n");
5652 			goto dump_ok;
5653 		}
5654 		addr += words_to_read;
5655 	}
5656 
5657 	/*
5658 	 * Label the end tag
5659 	 */
5660 	n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5661 	buf += n; size_left -= n;
5662 
5663 	/*
5664 	 * Queue dumping
5665 	 */
5666 	n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5667 	buf += n; size_left -= n;
5668 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5669 	    REQUEST_QUEUE_ENTRIES, buf, size_left);
5670 	buf += n; size_left -= n;
5671 
5672 	n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5673 	buf += n; size_left -= n;
5674 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5675 	    PRIORITY_QUEUE_ENTRIES, buf, size_left);
5676 	buf += n; size_left -= n;
5677 
5678 	n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5679 	buf += n; size_left -= n;
5680 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5681 	    RESPONSE_QUEUE_ENTRIES, buf, size_left);
5682 	buf += n; size_left -= n;
5683 
5684 	n = (int)snprintf(buf, size_left, "\nATIO queue\n");
5685 	buf += n; size_left -= n;
5686 	n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5687 	    ATIO_QUEUE_ENTRIES, buf, size_left);
5688 	buf += n; size_left -= n;
5689 
5690 	/*
5691 	 * Label dump reason
5692 	 */
5693 	n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5694 	    qlt->qlt_port_alias, ssci->st_additional_info);
5695 	buf += n; size_left -= n;
5696 
5697 dump_ok:
5698 	EL(qlt, "left-%d\n", size_left);
5699 
5700 	mutex_enter(&qlt->qlt_ioctl_lock);
5701 	qlt->qlt_ioctl_flags &=
5702 	    ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5703 	qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5704 	mutex_exit(&qlt->qlt_ioctl_lock);
5705 	return (FCT_SUCCESS);
5706 
5707 dump_fail:
5708 	EL(qlt, "dump not done\n");
5709 	mutex_enter(&qlt->qlt_ioctl_lock);
5710 	qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5711 	mutex_exit(&qlt->qlt_ioctl_lock);
5712 	return (FCT_FAILURE);
5713 }
5714 
5715 static int
qlt_fwdump_dump_regs(qlt_state_t * qlt,caddr_t buf,int startaddr,int count,uint_t size_left)5716 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5717     uint_t size_left)
5718 {
5719 	int		i;
5720 	int		n;
5721 	char		c = ' ';
5722 
5723 	for (i = 0, n = 0; i < count; i++) {
5724 		if ((i + 1) & 7) {
5725 			c = ' ';
5726 		} else {
5727 			c = '\n';
5728 		}
5729 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5730 		    "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
5731 	}
5732 	return (n);
5733 }
5734 
5735 static int
qlt_dump_risc_ram(qlt_state_t * qlt,uint32_t addr,uint32_t words,caddr_t buf,uint_t size_left)5736 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5737     caddr_t buf, uint_t size_left)
5738 {
5739 	int		i;
5740 	int		n;
5741 	char		c = ' ';
5742 	uint32_t	*ptr;
5743 
5744 	ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5745 	for (i = 0, n = 0; i < words; i++) {
5746 		if ((i & 7) == 0) {
5747 			n = (int)(n + (int)snprintf(&buf[n],
5748 			    (uint_t)(size_left - n), "%08x: ", addr + i));
5749 		}
5750 		if ((i + 1) & 7) {
5751 			c = ' ';
5752 		} else {
5753 			c = '\n';
5754 		}
5755 		n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5756 		    "%08x%c", ptr[i], c));
5757 	}
5758 	return (n);
5759 }
5760 
5761 static int
qlt_dump_queue(qlt_state_t * qlt,caddr_t qadr,int entries,caddr_t buf,uint_t size_left)5762 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5763     uint_t size_left)
5764 {
5765 	int		i;
5766 	int		n;
5767 	char		c = ' ';
5768 	int		words;
5769 	uint16_t	*ptr;
5770 	uint16_t	w;
5771 
5772 	words = entries * 32;
5773 	ptr = (uint16_t *)qadr;
5774 	for (i = 0, n = 0; i < words; i++) {
5775 		if ((i & 7) == 0) {
5776 			n = (int)(n + (int)snprintf(&buf[n],
5777 			    (uint_t)(size_left - n), "%05x: ", i));
5778 		}
5779 		if ((i + 1) & 7) {
5780 			c = ' ';
5781 		} else {
5782 			c = '\n';
5783 		}
5784 		w = QMEM_RD16(qlt, &ptr[i]);
5785 		n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
5786 		    w, c));
5787 	}
5788 	return (n);
5789 }
5790 
5791 /*
5792  * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5793  * mailbox ram is available.
5794  * Copy data from RISC RAM to system memory
5795  */
5796 static fct_status_t
qlt_read_risc_ram(qlt_state_t * qlt,uint32_t addr,uint32_t words)5797 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5798 {
5799 	uint64_t	da;
5800 	fct_status_t	ret;
5801 
5802 	REG_WR16(qlt, REG_MBOX(0), MBC_DUMP_RAM_EXTENDED);
5803 	da = qlt->queue_mem_cookie.dmac_laddress;
5804 	da += MBOX_DMA_MEM_OFFSET;
5805 
5806 	/* System destination address */
5807 	REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
5808 	REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
5809 	REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
5810 	REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
5811 
5812 	/* Length */
5813 	REG_WR16(qlt, REG_MBOX(5), LSW(words));
5814 	REG_WR16(qlt, REG_MBOX(4), MSW(words));
5815 
5816 	/* RISC source address */
5817 	REG_WR16(qlt, REG_MBOX(1), LSW(addr));
5818 	REG_WR16(qlt, REG_MBOX(8), MSW(addr));
5819 
5820 	ret = qlt_raw_mailbox_command(qlt);
5821 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
5822 	if (ret == QLT_SUCCESS) {
5823 		(void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5824 		    MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5825 	} else {
5826 		EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
5827 	}
5828 	return (ret);
5829 }
5830 
5831 static void
qlt_verify_fw(qlt_state_t * qlt)5832 qlt_verify_fw(qlt_state_t *qlt)
5833 {
5834 	caddr_t req;
5835 	/* Just put it on the request queue */
5836 	mutex_enter(&qlt->req_lock);
5837 	req = qlt_get_req_entries(qlt, 1);
5838 	if (req == NULL) {
5839 		mutex_exit(&qlt->req_lock);
5840 		/* XXX handle this */
5841 		return;
5842 	}
5843 
5844 	bzero(req, IOCB_SIZE);
5845 
5846 	req[0] = 0x1b;
5847 	req[1] = 1;
5848 
5849 	QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5850 	QMEM_WR16(qlt, (&req[0x8]), 1);    /*  options - don't update */
5851 	QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5852 
5853 	qlt_submit_req_entries(qlt, 1);
5854 	mutex_exit(&qlt->req_lock);
5855 }
5856 
5857 static void
qlt_handle_verify_fw_completion(qlt_state_t * qlt,uint8_t * rsp)5858 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5859 {
5860 	uint16_t	status;
5861 	char		info[QLT_INFO_LEN];
5862 
5863 	status = QMEM_RD16(qlt, rsp+8);
5864 	if (status != 0) {
5865 		(void) snprintf(info, sizeof (info),
5866 		    "qlt_handle_verify_fw_completion: "
5867 		    "status:%x, rsp:%p", status, (void *)rsp);
5868 		if (status == 3) {
5869 			uint16_t error_code;
5870 
5871 			error_code = QMEM_RD16(qlt, rsp+0xA);
5872 			(void) snprintf(info, sizeof (info),
5873 			    "qlt_handle_verify_fw_completion: error code:%x",
5874 			    error_code);
5875 		}
5876 	}
5877 }
5878 
5879 /*
5880  * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5881  *
5882  * Input:	Pointer to the adapter state structure.
5883  * Returns:	Success or Failure.
5884  * Context:	Kernel context.
5885  */
5886 static int
qlt_el_trace_desc_ctor(qlt_state_t * qlt)5887 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5888 {
5889 	int	rval = DDI_SUCCESS;
5890 
5891 	qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5892 	    kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
5893 
5894 	if (qlt->el_trace_desc == NULL) {
5895 		cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5896 		    qlt->instance);
5897 		rval = DDI_FAILURE;
5898 	} else {
5899 		qlt->el_trace_desc->next = 0;
5900 		qlt->el_trace_desc->trace_buffer =
5901 		    (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
5902 
5903 		if (qlt->el_trace_desc->trace_buffer == NULL) {
5904 			cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5905 			    qlt->instance);
5906 			kmem_free(qlt->el_trace_desc,
5907 			    sizeof (qlt_el_trace_desc_t));
5908 			qlt->el_trace_desc = NULL;
5909 			rval = DDI_FAILURE;
5910 		} else {
5911 			qlt->el_trace_desc->trace_buffer_size =
5912 			    EL_TRACE_BUF_SIZE;
5913 			mutex_init(&qlt->el_trace_desc->mutex, NULL,
5914 			    MUTEX_DRIVER, NULL);
5915 		}
5916 	}
5917 
5918 	return (rval);
5919 }
5920 
5921 /*
5922  * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5923  *
5924  * Input:	Pointer to the adapter state structure.
5925  * Returns:	Success or Failure.
5926  * Context:	Kernel context.
5927  */
5928 static int
qlt_el_trace_desc_dtor(qlt_state_t * qlt)5929 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5930 {
5931 	int	rval = DDI_SUCCESS;
5932 
5933 	if (qlt->el_trace_desc == NULL) {
5934 		cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5935 		    qlt->instance);
5936 		rval = DDI_FAILURE;
5937 	} else {
5938 		if (qlt->el_trace_desc->trace_buffer != NULL) {
5939 			kmem_free(qlt->el_trace_desc->trace_buffer,
5940 			    qlt->el_trace_desc->trace_buffer_size);
5941 		}
5942 		mutex_destroy(&qlt->el_trace_desc->mutex);
5943 		kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5944 		qlt->el_trace_desc = NULL;
5945 	}
5946 
5947 	return (rval);
5948 }
5949 
5950 /*
5951  * qlt_el_msg
5952  *	Extended logging message
5953  *
5954  * Input:
5955  *	qlt:	adapter state pointer.
5956  *	fn:	function name.
5957  *	ce:	level
5958  *	...:	Variable argument list.
5959  *
5960  * Context:
5961  *	Kernel/Interrupt context.
5962  */
5963 void
qlt_el_msg(qlt_state_t * qlt,const char * fn,int ce,...)5964 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5965 {
5966 	char		*s, *fmt = 0, *fmt1 = 0;
5967 	char		fmt2[EL_BUFFER_RESERVE];
5968 	int		rval, tmp;
5969 	int		tracing = 0;
5970 	va_list		vl;
5971 
5972 	/* Tracing is the default but it can be disabled. */
5973 	if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5974 		tracing = 1;
5975 
5976 		mutex_enter(&qlt->el_trace_desc->mutex);
5977 
5978 		/*
5979 		 * Ensure enough space for the string. Wrap to
5980 		 * start when default message allocation size
5981 		 * would overrun the end.
5982 		 */
5983 		if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5984 		    qlt->el_trace_desc->trace_buffer_size) {
5985 			fmt = qlt->el_trace_desc->trace_buffer;
5986 			qlt->el_trace_desc->next = 0;
5987 		} else {
5988 			fmt = qlt->el_trace_desc->trace_buffer +
5989 			    qlt->el_trace_desc->next;
5990 		}
5991 	}
5992 
5993 	/* if no buffer use the stack */
5994 	if (fmt == NULL) {
5995 		fmt = fmt2;
5996 	}
5997 
5998 	va_start(vl, ce);
5999 
6000 	s = va_arg(vl, char *);
6001 
6002 	rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
6003 	    "QEL qlt(%d): %s, ", qlt->instance, fn);
6004 	fmt1 = fmt + rval;
6005 	tmp = (int)vsnprintf(fmt1,
6006 	    (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
6007 	rval += tmp;
6008 
6009 	/*
6010 	 * Calculate the offset where the next message will go,
6011 	 * skipping the NULL.
6012 	 */
6013 	if (tracing) {
6014 		uint16_t next = (uint16_t)(rval += 1);
6015 		qlt->el_trace_desc->next += next;
6016 		mutex_exit(&qlt->el_trace_desc->mutex);
6017 	}
6018 
6019 	if (enable_extended_logging) {
6020 		cmn_err(ce, fmt);
6021 	}
6022 
6023 	va_end(vl);
6024 }
6025 
6026 /*
6027  * qlt_dump_el_trace_buffer
6028  *	 Outputs extended logging trace buffer.
6029  *
6030  * Input:
6031  *	qlt:	adapter state pointer.
6032  */
6033 void
qlt_dump_el_trace_buffer(qlt_state_t * qlt)6034 qlt_dump_el_trace_buffer(qlt_state_t *qlt)
6035 {
6036 	char		*dump_start = NULL;
6037 	char		*dump_current = NULL;
6038 	char		*trace_start;
6039 	char		*trace_end;
6040 	int		wrapped = 0;
6041 	int		rval;
6042 
6043 	mutex_enter(&qlt->el_trace_desc->mutex);
6044 
6045 	rval = qlt_validate_trace_desc(qlt);
6046 	if (rval != NULL) {
6047 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
6048 		    qlt->instance);
6049 	} else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
6050 		dump_current = dump_start;
6051 		trace_start = qlt->el_trace_desc->trace_buffer;
6052 		trace_end = trace_start +
6053 		    qlt->el_trace_desc->trace_buffer_size;
6054 
6055 		cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
6056 		    qlt->instance,
6057 		    (void *)dump_start, (void *)trace_start);
6058 
6059 		while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
6060 		    (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
6061 			/* Show it... */
6062 			cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
6063 			    dump_current);
6064 			/* Make the next the current */
6065 			dump_current += (strlen(dump_current) + 1);
6066 			/* check for wrap */
6067 			if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
6068 				dump_current = trace_start;
6069 				wrapped = 1;
6070 			} else if (wrapped) {
6071 				/* Don't go past next. */
6072 				if ((trace_start + qlt->el_trace_desc->next) <=
6073 				    dump_current) {
6074 					break;
6075 				}
6076 			} else if (*dump_current == NULL) {
6077 				break;
6078 			}
6079 		}
6080 	}
6081 	mutex_exit(&qlt->el_trace_desc->mutex);
6082 }
6083 
6084 /*
6085  * qlt_validate_trace_desc
6086  *	 Ensures the extended logging trace descriptor is good.
6087  *
6088  * Input:
6089  *	qlt:	adapter state pointer.
6090  *
6091  * Returns:
6092  *	ql local function return status code.
6093  */
6094 static int
qlt_validate_trace_desc(qlt_state_t * qlt)6095 qlt_validate_trace_desc(qlt_state_t *qlt)
6096 {
6097 	int	rval = DDI_SUCCESS;
6098 
6099 	if (qlt->el_trace_desc == NULL) {
6100 		rval = DDI_FAILURE;
6101 	} else if (qlt->el_trace_desc->trace_buffer == NULL) {
6102 		rval = DDI_FAILURE;
6103 	}
6104 	return (rval);
6105 }
6106 
6107 /*
6108  * qlt_find_trace_start
6109  *	 Locate the oldest extended logging trace entry.
6110  *
6111  * Input:
6112  *	qlt:	adapter state pointer.
6113  *
6114  * Returns:
6115  *	Pointer to a string.
6116  *
6117  * Context:
6118  *	Kernel/Interrupt context.
6119  */
6120 static char *
qlt_find_trace_start(qlt_state_t * qlt)6121 qlt_find_trace_start(qlt_state_t *qlt)
6122 {
6123 	char	*trace_start = 0;
6124 	char	*trace_next  = 0;
6125 
6126 	trace_next = qlt->el_trace_desc->trace_buffer +
6127 	    qlt->el_trace_desc->next;
6128 
6129 	/*
6130 	 * If the buffer has not wrapped next will point at a null so
6131 	 * start is the beginning of the buffer.  If next points at a char
6132 	 * then we must traverse the buffer until a null is detected and
6133 	 * that will be the beginning of the oldest whole object in the buffer
6134 	 * which is the start.
6135 	 */
6136 
6137 	if ((trace_next + EL_BUFFER_RESERVE) >=
6138 	    (qlt->el_trace_desc->trace_buffer +
6139 	    qlt->el_trace_desc->trace_buffer_size)) {
6140 		trace_start = qlt->el_trace_desc->trace_buffer;
6141 	} else if (*trace_next != NULL) {
6142 		trace_start = trace_next + (strlen(trace_next) + 1);
6143 	} else {
6144 		trace_start = qlt->el_trace_desc->trace_buffer;
6145 	}
6146 	return (trace_start);
6147 }
6148 
6149 
6150 static int
qlt_read_int_prop(qlt_state_t * qlt,char * prop,int defval)6151 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
6152 {
6153 	return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
6154 	    DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
6155 }
6156 
6157 static int
qlt_read_string_prop(qlt_state_t * qlt,char * prop,char ** prop_val)6158 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6159 {
6160 	return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
6161 	    DDI_PROP_DONTPASS, prop, prop_val));
6162 }
6163 
6164 static int
qlt_read_int_instance_prop(qlt_state_t * qlt,char * prop,int defval)6165 qlt_read_int_instance_prop(qlt_state_t *qlt, char *prop, int defval)
6166 {
6167 	char		inst_prop[256];
6168 	int		val;
6169 
6170 	/*
6171 	 * Get adapter instance specific parameters. If the instance
6172 	 * specific parameter isn't there, try the global parameter.
6173 	 */
6174 
6175 	(void) sprintf(inst_prop, "hba%d-%s", qlt->instance, prop);
6176 
6177 	if ((val = qlt_read_int_prop(qlt, inst_prop, defval)) == defval) {
6178 		val = qlt_read_int_prop(qlt, prop, defval);
6179 	}
6180 
6181 	return (val);
6182 }
6183 
6184 static int
qlt_read_string_instance_prop(qlt_state_t * qlt,char * prop,char ** prop_val)6185 qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6186 {
6187 	char		instance_prop[256];
6188 
6189 	/* Get adapter instance specific parameter. */
6190 	(void) sprintf(instance_prop, "hba%d-%s", qlt->instance, prop);
6191 	return (qlt_read_string_prop(qlt, instance_prop, prop_val));
6192 }
6193 
6194 static int
qlt_convert_string_to_ull(char * prop,int radix,u_longlong_t * result)6195 qlt_convert_string_to_ull(char *prop, int radix,
6196     u_longlong_t *result)
6197 {
6198 	return (ddi_strtoull((const char *)prop, 0, radix, result));
6199 }
6200 
6201 static boolean_t
qlt_wwn_overload_prop(qlt_state_t * qlt)6202 qlt_wwn_overload_prop(qlt_state_t *qlt)
6203 {
6204 	char		*prop_val = 0;
6205 	int		rval;
6206 	int		radix;
6207 	u_longlong_t	wwnn = 0, wwpn = 0;
6208 	boolean_t	overloaded = FALSE;
6209 
6210 	radix = 16;
6211 
6212 	rval = qlt_read_string_instance_prop(qlt, "adapter-wwnn", &prop_val);
6213 	if (rval == DDI_PROP_SUCCESS) {
6214 		rval = qlt_convert_string_to_ull(prop_val, radix, &wwnn);
6215 	}
6216 	if (rval == DDI_PROP_SUCCESS) {
6217 		rval = qlt_read_string_instance_prop(qlt, "adapter-wwpn",
6218 		    &prop_val);
6219 		if (rval == DDI_PROP_SUCCESS) {
6220 			rval = qlt_convert_string_to_ull(prop_val, radix,
6221 			    &wwpn);
6222 		}
6223 	}
6224 	if (rval == DDI_PROP_SUCCESS) {
6225 		overloaded = TRUE;
6226 		/* Overload the current node/port name nvram copy */
6227 		bcopy((char *)&wwnn, qlt->nvram->node_name, 8);
6228 		BIG_ENDIAN_64(qlt->nvram->node_name);
6229 		bcopy((char *)&wwpn, qlt->nvram->port_name, 8);
6230 		BIG_ENDIAN_64(qlt->nvram->port_name);
6231 	}
6232 	return (overloaded);
6233 }
6234 
6235 /*
6236  * prop_text - Return a pointer to a string describing the status
6237  *
6238  * Input:	prop_status = the return status from a property function.
6239  * Returns:	pointer to a string.
6240  * Context:	Kernel context.
6241  */
6242 char *
prop_text(int prop_status)6243 prop_text(int prop_status)
6244 {
6245 	string_table_t *entry = &prop_status_tbl[0];
6246 
6247 	return (value2string(entry, prop_status, 0xFFFF));
6248 }
6249 
6250 /*
6251  * value2string	Return a pointer to a string associated with the value
6252  *
6253  * Input:	entry = the value to string table
6254  *		value = the value
6255  * Returns:	pointer to a string.
6256  * Context:	Kernel context.
6257  */
6258 char *
value2string(string_table_t * entry,int value,int delimiter)6259 value2string(string_table_t *entry, int value, int delimiter)
6260 {
6261 	for (; entry->value != delimiter; entry++) {
6262 		if (entry->value == value) {
6263 			break;
6264 		}
6265 	}
6266 	return (entry->string);
6267 }
6268 
6269 /*
6270  * qlt_chg_endian Change endianess of byte array.
6271  *
6272  * Input:	buf = array pointer.
6273  *		size = size of array in bytes.
6274  *
6275  * Context:	Interrupt or Kernel context.
6276  */
6277 void
qlt_chg_endian(uint8_t buf[],size_t size)6278 qlt_chg_endian(uint8_t buf[], size_t size)
6279 {
6280 	uint8_t byte;
6281 	size_t  cnt1;
6282 	size_t  cnt;
6283 
6284 	cnt1 = size - 1;
6285 	for (cnt = 0; cnt < size / 2; cnt++) {
6286 		byte = buf[cnt1];
6287 		buf[cnt1] = buf[cnt];
6288 		buf[cnt] = byte;
6289 		cnt1--;
6290 	}
6291 }
6292 
6293 /*
6294  * ql_mps_reset
6295  *	Reset MPS for FCoE functions.
6296  *
6297  * Input:
6298  *	ha = virtual adapter state pointer.
6299  *
6300  * Context:
6301  *	Kernel context.
6302  */
6303 static void
qlt_mps_reset(qlt_state_t * qlt)6304 qlt_mps_reset(qlt_state_t *qlt)
6305 {
6306 	uint32_t	data, dctl = 1000;
6307 
6308 	do {
6309 		if (dctl-- == 0 || qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 1) !=
6310 		    QLT_SUCCESS) {
6311 			return;
6312 		}
6313 		if (qlt_raw_rd_risc_ram_word(qlt, 0x7c00, &data) !=
6314 		    QLT_SUCCESS) {
6315 			(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6316 			return;
6317 		}
6318 	} while (!(data & BIT_0));
6319 
6320 	if (qlt_raw_rd_risc_ram_word(qlt, 0x7A15, &data) == QLT_SUCCESS) {
6321 		dctl = (uint16_t)PCICFG_RD16(qlt, 0x54);
6322 		if ((data & 0xe0) != (dctl & 0xe0)) {
6323 			data &= 0xff1f;
6324 			data |= dctl & 0xe0;
6325 			(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7A15, data);
6326 		}
6327 	}
6328 	(void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6329 }
6330 
6331 /*
6332  * qlt_raw_wrt_risc_ram_word
6333  *	Write RISC RAM word.
6334  *
6335  * Input:	qlt:		adapter state pointer.
6336  *		risc_address:	risc ram word address.
6337  *		data:		data.
6338  *
6339  * Returns:	qlt local function return status code.
6340  *
6341  * Context:	Kernel context.
6342  */
6343 static fct_status_t
qlt_raw_wrt_risc_ram_word(qlt_state_t * qlt,uint32_t risc_address,uint32_t data)6344 qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6345     uint32_t data)
6346 {
6347 	fct_status_t	ret;
6348 
6349 	REG_WR16(qlt, REG_MBOX(0), MBC_WRITE_RAM_EXTENDED);
6350 	REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6351 	REG_WR16(qlt, REG_MBOX(2), LSW(data));
6352 	REG_WR16(qlt, REG_MBOX(3), MSW(data));
6353 	REG_WR16(qlt, REG_MBOX(8), MSW(risc_address));
6354 	ret = qlt_raw_mailbox_command(qlt);
6355 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6356 	if (ret != QLT_SUCCESS) {
6357 		EL(qlt, "qlt_raw_mailbox_command=MBC_WRITE_RAM_EXTENDED status"
6358 		    "=%llxh\n", ret);
6359 	}
6360 	return (ret);
6361 }
6362 
6363 /*
6364  * ql_raw_rd_risc_ram_word
6365  *	Read RISC RAM word.
6366  *
6367  * Input:	qlt:		adapter state pointer.
6368  *		risc_address:	risc ram word address.
6369  *		data:		data pointer.
6370  *
6371  * Returns:	ql local function return status code.
6372  *
6373  * Context:	Kernel context.
6374  */
6375 static fct_status_t
qlt_raw_rd_risc_ram_word(qlt_state_t * qlt,uint32_t risc_address,uint32_t * data)6376 qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6377     uint32_t *data)
6378 {
6379 	fct_status_t	ret;
6380 
6381 	REG_WR16(qlt, REG_MBOX(0), MBC_READ_RAM_EXTENDED);
6382 	REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6383 	REG_WR16(qlt, REG_MBOX(2), MSW(risc_address));
6384 	ret = qlt_raw_mailbox_command(qlt);
6385 	*data = REG_RD16(qlt, REG_MBOX(2));
6386 	*data |= (REG_RD16(qlt, REG_MBOX(3)) << 16);
6387 	REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6388 	if (ret != QLT_SUCCESS) {
6389 		EL(qlt, "qlt_raw_mailbox_command=MBC_READ_RAM_EXTENDED status"
6390 		    "=%llxh\n", ret);
6391 	}
6392 	return (ret);
6393 }
6394 
6395 static void
qlt_properties(qlt_state_t * qlt)6396 qlt_properties(qlt_state_t *qlt)
6397 {
6398 	int32_t		cnt = 0;
6399 	int32_t		defval = 0xffff;
6400 
6401 	if (qlt_wwn_overload_prop(qlt) == TRUE) {
6402 		EL(qlt, "wwnn overloaded.\n");
6403 	}
6404 
6405 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt2k", defval)) !=
6406 	    defval) {
6407 		qlt->qlt_bucketcnt[0] = cnt;
6408 		EL(qlt, "2k bucket o/l=%d\n", cnt);
6409 	}
6410 
6411 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt8k", defval)) !=
6412 	    defval) {
6413 		qlt->qlt_bucketcnt[1] = cnt;
6414 		EL(qlt, "8k bucket o/l=%d\n", cnt);
6415 	}
6416 
6417 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt64k", defval)) !=
6418 	    defval) {
6419 		qlt->qlt_bucketcnt[2] = cnt;
6420 		EL(qlt, "64k bucket o/l=%d\n", cnt);
6421 	}
6422 
6423 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt128k", defval)) !=
6424 	    defval) {
6425 		qlt->qlt_bucketcnt[3] = cnt;
6426 		EL(qlt, "128k bucket o/l=%d\n", cnt);
6427 	}
6428 
6429 	if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt256", defval)) !=
6430 	    defval) {
6431 		qlt->qlt_bucketcnt[4] = cnt;
6432 		EL(qlt, "256k bucket o/l=%d\n", cnt);
6433 	}
6434 }
6435