xref: /illumos-gate/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c (revision bb0ade0978a02d3fe0b0165cd4725fdcb593fbfb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *
31  * nv_sata is a combo SATA HBA driver for ck804/mcp55 based chipsets.
32  *
33  * NCQ
34  * ---
35  *
36  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
37  * and is likely to be revisited in the future.
38  *
39  *
40  * Power Management
41  * ----------------
42  *
43  * Normally power management would be responsible for ensuring the device
44  * is quiescent and then changing power states to the device, such as
45  * powering down parts or all of the device.  mcp55/ck804 is unique in
46  * that it is only available as part of a larger southbridge chipset, so
47  * removing power to the device isn't possible.  Switches to control
48  * power management states D0/D3 in the PCI configuration space appear to
49  * be supported but changes to these states are apparently are ignored.
50  * The only further PM that the driver _could_ do is shut down the PHY,
51  * but in order to deliver the first rev of the driver sooner than later,
52  * that will be deferred until some future phase.
53  *
54  * Since the driver currently will not directly change any power state to
55  * the device, no power() entry point will be required.  However, it is
56  * possible that in ACPI power state S3, aka suspend to RAM, that power
57  * can be removed to the device, and the driver cannot rely on BIOS to
58  * have reset any state.  For the time being, there is no known
59  * non-default configurations that need to be programmed.  This judgement
60  * is based on the port of the legacy ata driver not having any such
61  * functionality and based on conversations with the PM team.  If such a
62  * restoration is later deemed necessary it can be incorporated into the
63  * DDI_RESUME processing.
64  *
65  */
66 
67 #include <sys/scsi/scsi.h>
68 #include <sys/pci.h>
69 #include <sys/byteorder.h>
70 #include <sys/sata/sata_hba.h>
71 #include <sys/sata/adapters/nv_sata/nv_sata.h>
72 #include <sys/disp.h>
73 #include <sys/note.h>
74 #include <sys/promif.h>
75 
76 
77 /*
78  * Function prototypes for driver entry points
79  */
80 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
81 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
82 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
83     void *arg, void **result);
84 
85 /*
86  * Function prototypes for entry points from sata service module
87  * These functions are distinguished from other local functions
88  * by the prefix "nv_sata_"
89  */
90 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
91 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
92 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
93 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
94 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
95 
96 /*
97  * Local function prototypes
98  */
99 static uint_t mcp55_intr(caddr_t arg1, caddr_t arg2);
100 static uint_t mcp04_intr(caddr_t arg1, caddr_t arg2);
101 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
102 #ifdef NV_MSI_SUPPORTED
103 static int nv_add_msi_intrs(nv_ctl_t *nvc);
104 #endif
105 static void nv_rem_intrs(nv_ctl_t *nvc);
106 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
107 static int nv_start_nodata(nv_port_t *nvp, int slot);
108 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
109 static int nv_start_pio_in(nv_port_t *nvp, int slot);
110 static int nv_start_pio_out(nv_port_t *nvp, int slot);
111 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
112 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
113 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
114 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
115 static int nv_start_dma(nv_port_t *nvp, int slot);
116 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
117 static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
118 static void nv_uninit_ctl(nv_ctl_t *nvc);
119 static void mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
120 static void mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
121 static void nv_uninit_port(nv_port_t *nvp);
122 static int nv_init_port(nv_port_t *nvp);
123 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
124 static int mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
125 #ifdef NCQ
126 static int mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
127 #endif
128 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
129 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
130     int state);
131 static boolean_t nv_check_link(uint32_t sstatus);
132 static void nv_common_reg_init(nv_ctl_t *nvc);
133 static void mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
134 static void nv_reset(nv_port_t *nvp);
135 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
136 static void nv_timeout(void *);
137 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
138 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
139 static void nv_read_signature(nv_port_t *nvp);
140 static void mcp55_set_intr(nv_port_t *nvp, int flag);
141 static void mcp04_set_intr(nv_port_t *nvp, int flag);
142 static void nv_resume(nv_port_t *nvp);
143 static void nv_suspend(nv_port_t *nvp);
144 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
145 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
146 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
147     sata_pkt_t *spkt);
148 static void nv_report_add_remove(nv_port_t *nvp, int flags);
149 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
150 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
151     uchar_t failure_onbits2, uchar_t failure_offbits2,
152     uchar_t failure_onbits3, uchar_t failure_offbits3,
153     uint_t timeout_usec, int type_wait);
154 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
155     uint_t timeout_usec, int type_wait);
156 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
157 
158 
159 /*
160  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
161  * Verify if needed if ported to other ISA.
162  */
163 static ddi_dma_attr_t buffer_dma_attr = {
164 	DMA_ATTR_V0,		/* dma_attr_version */
165 	0,			/* dma_attr_addr_lo: lowest bus address */
166 	0xffffffffull,		/* dma_attr_addr_hi: */
167 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
168 	4,			/* dma_attr_align */
169 	1,			/* dma_attr_burstsizes. */
170 	1,			/* dma_attr_minxfer */
171 	0xffffffffull,		/* dma_attr_max xfer including all cookies */
172 	0xffffffffull,		/* dma_attr_seg */
173 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
174 	512,			/* dma_attr_granular */
175 	0,			/* dma_attr_flags */
176 };
177 
178 
179 /*
180  * DMA attributes for PRD tables
181  */
182 ddi_dma_attr_t nv_prd_dma_attr = {
183 	DMA_ATTR_V0,		/* dma_attr_version */
184 	0,			/* dma_attr_addr_lo */
185 	0xffffffffull,		/* dma_attr_addr_hi */
186 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
187 	4,			/* dma_attr_align */
188 	1,			/* dma_attr_burstsizes */
189 	1,			/* dma_attr_minxfer */
190 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
191 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
192 	1,			/* dma_attr_sgllen */
193 	1,			/* dma_attr_granular */
194 	0			/* dma_attr_flags */
195 };
196 
197 /*
198  * Device access attributes
199  */
200 static ddi_device_acc_attr_t accattr = {
201     DDI_DEVICE_ATTR_V0,
202     DDI_STRUCTURE_LE_ACC,
203     DDI_STRICTORDER_ACC
204 };
205 
206 
207 static struct dev_ops nv_dev_ops = {
208 	DEVO_REV,		/* devo_rev */
209 	0,			/* refcnt  */
210 	nv_getinfo,		/* info */
211 	nulldev,		/* identify */
212 	nulldev,		/* probe */
213 	nv_attach,		/* attach */
214 	nv_detach,		/* detach */
215 	nodev,			/* no reset */
216 	(struct cb_ops *)0,	/* driver operations */
217 	NULL,			/* bus operations */
218 	NULL			/* power */
219 };
220 
221 
222 /*
223  * Request Sense CDB for ATAPI
224  */
225 static const uint8_t nv_rqsense_cdb[16] = {
226 	SCMD_REQUEST_SENSE,
227 	0,
228 	0,
229 	0,
230 	SATA_ATAPI_MIN_RQSENSE_LEN,
231 	0,
232 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
233 };
234 
235 
236 static sata_tran_hotplug_ops_t nv_hotplug_ops;
237 
238 extern struct mod_ops mod_driverops;
239 
240 static  struct modldrv modldrv = {
241 	&mod_driverops,	/* driverops */
242 	"Nvidia ck804/mcp55 HBA v%I%",
243 	&nv_dev_ops,	/* driver ops */
244 };
245 
246 static  struct modlinkage modlinkage = {
247 	MODREV_1,
248 	&modldrv,
249 	NULL
250 };
251 
252 
253 /*
254  * wait between checks of reg status
255  */
256 int nv_usec_delay = NV_WAIT_REG_CHECK;
257 
258 /*
259  * The following is needed for nv_vcmn_err()
260  */
261 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
262 static char nv_log_buf[NV_STRING_512];
263 int nv_debug_flags = NVDBG_ALWAYS;
264 int nv_log_to_console = B_FALSE;
265 
266 int nv_log_delay = 0;
267 int nv_prom_print = B_FALSE;
268 
269 /*
270  * for debugging
271  */
272 #ifdef DEBUG
273 int ncq_commands = 0;
274 int non_ncq_commands = 0;
275 #endif
276 
277 /*
278  * Opaque state pointer to be initialized by ddi_soft_state_init()
279  */
280 static void *nv_statep	= NULL;
281 
282 
283 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
284 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
285 	nv_sata_activate,	/* activate port. cfgadm -c connect */
286 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
287 };
288 
289 
290 /*
291  *  nv module initialization
292  */
293 int
294 _init(void)
295 {
296 	int	error;
297 
298 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
299 
300 	if (error != 0) {
301 
302 		return (error);
303 	}
304 
305 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
306 
307 	if ((error = sata_hba_init(&modlinkage)) != 0) {
308 		ddi_soft_state_fini(&nv_statep);
309 		mutex_destroy(&nv_log_mutex);
310 
311 		return (error);
312 	}
313 
314 	error = mod_install(&modlinkage);
315 	if (error != 0) {
316 		sata_hba_fini(&modlinkage);
317 		ddi_soft_state_fini(&nv_statep);
318 		mutex_destroy(&nv_log_mutex);
319 
320 		return (error);
321 	}
322 
323 	return (error);
324 }
325 
326 
327 /*
328  * nv module uninitialize
329  */
330 int
331 _fini(void)
332 {
333 	int	error;
334 
335 	error = mod_remove(&modlinkage);
336 
337 	if (error != 0) {
338 		return (error);
339 	}
340 
341 	/*
342 	 * remove the resources allocated in _init()
343 	 */
344 	mutex_destroy(&nv_log_mutex);
345 	sata_hba_fini(&modlinkage);
346 	ddi_soft_state_fini(&nv_statep);
347 
348 	return (error);
349 }
350 
351 
352 /*
353  * nv _info entry point
354  */
355 int
356 _info(struct modinfo *modinfop)
357 {
358 	return (mod_info(&modlinkage, modinfop));
359 }
360 
361 
362 /*
363  * these wrappers for ddi_{get,put}8 are for observability
364  * with dtrace
365  */
366 #ifdef DEBUG
367 
368 static void
369 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
370 {
371 	ddi_put8(handle, dev_addr, value);
372 }
373 
374 static void
375 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
376 {
377 	ddi_put32(handle, dev_addr, value);
378 }
379 
380 static uint32_t
381 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
382 {
383 	return (ddi_get32(handle, dev_addr));
384 }
385 
386 static void
387 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
388 {
389 	ddi_put16(handle, dev_addr, value);
390 }
391 
392 static uint16_t
393 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
394 {
395 	return (ddi_get16(handle, dev_addr));
396 }
397 
398 static uint8_t
399 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
400 {
401 	return (ddi_get8(handle, dev_addr));
402 }
403 
404 #else
405 
406 #define	nv_put8 ddi_put8
407 #define	nv_put32 ddi_put32
408 #define	nv_get32 ddi_get32
409 #define	nv_put16 ddi_put16
410 #define	nv_get16 ddi_get16
411 #define	nv_get8 ddi_get8
412 
413 #endif
414 
415 
416 /*
417  * Driver attach
418  */
419 static int
420 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
421 {
422 	int status, attach_state, intr_types, bar, i, command;
423 	int inst = ddi_get_instance(dip);
424 	ddi_acc_handle_t pci_conf_handle;
425 	nv_ctl_t *nvc;
426 	uint8_t subclass;
427 	uint32_t reg32;
428 
429 	switch (cmd) {
430 
431 	case DDI_ATTACH:
432 
433 		NVLOG((NVDBG_INIT, NULL, NULL,
434 		    "nv_attach(): DDI_ATTACH inst %d", inst));
435 
436 		attach_state = ATTACH_PROGRESS_NONE;
437 
438 		status = ddi_soft_state_zalloc(nv_statep, inst);
439 
440 		if (status != DDI_SUCCESS) {
441 			break;
442 		}
443 
444 		nvc = ddi_get_soft_state(nv_statep, inst);
445 
446 		nvc->nvc_dip = dip;
447 
448 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
449 
450 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
451 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
452 			    PCI_CONF_REVID);
453 			NVLOG((NVDBG_INIT, NULL, NULL,
454 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
455 			    inst, nvc->nvc_revid, nv_debug_flags));
456 		} else {
457 			break;
458 		}
459 
460 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
461 
462 		/*
463 		 * If a device is attached after a suspend/resume, sometimes
464 		 * the command register is zero, as it might not be set by
465 		 * BIOS or a parent.  Set it again here.
466 		 */
467 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
468 
469 		if (command == 0) {
470 			cmn_err(CE_WARN, "nv_sata%d: restoring PCI command"
471 			    " register", inst);
472 			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
473 			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
474 		}
475 
476 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
477 
478 		if (subclass & PCI_MASS_RAID) {
479 			cmn_err(CE_WARN,
480 			    "attach failed: RAID mode not supported");
481 			break;
482 		}
483 
484 		/*
485 		 * the 6 bars of the controller are:
486 		 * 0: port 0 task file
487 		 * 1: port 0 status
488 		 * 2: port 1 task file
489 		 * 3: port 1 status
490 		 * 4: bus master for both ports
491 		 * 5: extended registers for SATA features
492 		 */
493 		for (bar = 0; bar < 6; bar++) {
494 			status = ddi_regs_map_setup(dip, bar + 1,
495 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
496 			    &nvc->nvc_bar_hdl[bar]);
497 
498 			if (status != DDI_SUCCESS) {
499 				NVLOG((NVDBG_INIT, nvc, NULL,
500 				    "ddi_regs_map_setup failure for bar"
501 				    " %d status = %d", bar, status));
502 				break;
503 			}
504 		}
505 
506 		attach_state |= ATTACH_PROGRESS_BARS;
507 
508 		/*
509 		 * initialize controller and driver core
510 		 */
511 		status = nv_init_ctl(nvc, pci_conf_handle);
512 
513 		if (status == NV_FAILURE) {
514 			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
515 
516 			break;
517 		}
518 
519 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
520 
521 		/*
522 		 * initialize mutexes
523 		 */
524 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
525 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
526 
527 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
528 
529 		/*
530 		 * get supported interrupt types
531 		 */
532 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
533 		    DDI_SUCCESS) {
534 			nv_cmn_err(CE_WARN, nvc, NULL,
535 			    "!ddi_intr_get_supported_types failed");
536 			NVLOG((NVDBG_INIT, nvc, NULL,
537 			    "interrupt supported types failed"));
538 
539 			break;
540 		}
541 
542 		NVLOG((NVDBG_INIT, nvc, NULL,
543 		    "ddi_intr_get_supported_types() returned: 0x%x",
544 		    intr_types));
545 
546 #ifdef NV_MSI_SUPPORTED
547 		if (intr_types & DDI_INTR_TYPE_MSI) {
548 			NVLOG((NVDBG_INIT, nvc, NULL,
549 			    "using MSI interrupt type"));
550 
551 			/*
552 			 * Try MSI first, but fall back to legacy if MSI
553 			 * attach fails
554 			 */
555 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
556 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
557 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
558 				NVLOG((NVDBG_INIT, nvc, NULL,
559 				    "MSI interrupt setup done"));
560 			} else {
561 				nv_cmn_err(CE_CONT, nvc, NULL,
562 				    "!MSI registration failed "
563 				    "will try Legacy interrupts");
564 			}
565 		}
566 #endif
567 
568 		/*
569 		 * Either the MSI interrupt setup has failed or only
570 		 * the fixed interrupts are available on the system.
571 		 */
572 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
573 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
574 
575 			NVLOG((NVDBG_INIT, nvc, NULL,
576 			    "using Legacy interrupt type"));
577 
578 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
579 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
580 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
581 				NVLOG((NVDBG_INIT, nvc, NULL,
582 				    "Legacy interrupt setup done"));
583 			} else {
584 				nv_cmn_err(CE_WARN, nvc, NULL,
585 				    "!legacy interrupt setup failed");
586 				NVLOG((NVDBG_INIT, nvc, NULL,
587 				    "legacy interrupt setup failed"));
588 				break;
589 			}
590 		}
591 
592 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
593 			NVLOG((NVDBG_INIT, nvc, NULL,
594 			    "no interrupts registered"));
595 			break;
596 		}
597 
598 		/*
599 		 * attach to sata module
600 		 */
601 		if (sata_hba_attach(nvc->nvc_dip,
602 		    &nvc->nvc_sata_hba_tran,
603 		    DDI_ATTACH) != DDI_SUCCESS) {
604 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
605 
606 			break;
607 		}
608 
609 		pci_config_teardown(&pci_conf_handle);
610 
611 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
612 
613 		return (DDI_SUCCESS);
614 
615 	case DDI_RESUME:
616 
617 		nvc = ddi_get_soft_state(nv_statep, inst);
618 
619 		NVLOG((NVDBG_INIT, nvc, NULL,
620 		    "nv_attach(): DDI_RESUME inst %d", inst));
621 
622 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
623 			return (DDI_FAILURE);
624 		}
625 
626 		/*
627 		 * If a device is attached after a suspend/resume, sometimes
628 		 * the command register is zero, as it might not be set by
629 		 * BIOS or a parent.  Set it again here.
630 		 */
631 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
632 
633 		if (command == 0) {
634 			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
635 			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
636 		}
637 
638 		/*
639 		 * Need to set bit 2 to 1 at config offset 0x50
640 		 * to enable access to the bar5 registers.
641 		 */
642 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
643 
644 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
645 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
646 			    reg32 | NV_BAR5_SPACE_EN);
647 		}
648 
649 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
650 
651 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
652 			nv_resume(&(nvc->nvc_port[i]));
653 		}
654 
655 		pci_config_teardown(&pci_conf_handle);
656 
657 		return (DDI_SUCCESS);
658 
659 	default:
660 		return (DDI_FAILURE);
661 	}
662 
663 
664 	/*
665 	 * DDI_ATTACH failure path starts here
666 	 */
667 
668 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
669 		nv_rem_intrs(nvc);
670 	}
671 
672 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
673 		/*
674 		 * Remove timers
675 		 */
676 		int port = 0;
677 		nv_port_t *nvp;
678 
679 		for (; port < NV_MAX_PORTS(nvc); port++) {
680 			nvp = &(nvc->nvc_port[port]);
681 			if (nvp->nvp_timeout_id != 0) {
682 				(void) untimeout(nvp->nvp_timeout_id);
683 			}
684 		}
685 	}
686 
687 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
688 		mutex_destroy(&nvc->nvc_mutex);
689 	}
690 
691 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
692 		nv_uninit_ctl(nvc);
693 	}
694 
695 	if (attach_state & ATTACH_PROGRESS_BARS) {
696 		while (--bar >= 0) {
697 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
698 		}
699 	}
700 
701 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
702 		ddi_soft_state_free(nv_statep, inst);
703 	}
704 
705 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
706 		pci_config_teardown(&pci_conf_handle);
707 	}
708 
709 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
710 
711 	return (DDI_FAILURE);
712 }
713 
714 
715 static int
716 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
717 {
718 	int i, port, inst = ddi_get_instance(dip);
719 	nv_ctl_t *nvc;
720 	nv_port_t *nvp;
721 
722 	nvc = ddi_get_soft_state(nv_statep, inst);
723 
724 	switch (cmd) {
725 
726 	case DDI_DETACH:
727 
728 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
729 
730 		/*
731 		 * Remove interrupts
732 		 */
733 		nv_rem_intrs(nvc);
734 
735 		/*
736 		 * Remove timers
737 		 */
738 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
739 			nvp = &(nvc->nvc_port[port]);
740 			if (nvp->nvp_timeout_id != 0) {
741 				(void) untimeout(nvp->nvp_timeout_id);
742 			}
743 		}
744 
745 		/*
746 		 * Remove maps
747 		 */
748 		for (i = 0; i < 6; i++) {
749 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
750 		}
751 
752 		/*
753 		 * Destroy mutexes
754 		 */
755 		mutex_destroy(&nvc->nvc_mutex);
756 
757 		/*
758 		 * Uninitialize the controller
759 		 */
760 		nv_uninit_ctl(nvc);
761 
762 		/*
763 		 * unregister from the sata module
764 		 */
765 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
766 
767 		/*
768 		 * Free soft state
769 		 */
770 		ddi_soft_state_free(nv_statep, inst);
771 
772 		return (DDI_SUCCESS);
773 
774 	case DDI_SUSPEND:
775 		/*
776 		 * The PM functions for suspend and resume are incomplete
777 		 * and need additional work.  It may or may not work in
778 		 * the current state.
779 		 */
780 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
781 
782 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
783 			nv_suspend(&(nvc->nvc_port[i]));
784 		}
785 
786 		nvc->nvc_state |= NV_CTRL_SUSPEND;
787 
788 		return (DDI_SUCCESS);
789 
790 	default:
791 		return (DDI_FAILURE);
792 	}
793 }
794 
795 
796 /*ARGSUSED*/
797 static int
798 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
799 {
800 	nv_ctl_t *nvc;
801 	int instance;
802 	dev_t dev;
803 
804 	dev = (dev_t)arg;
805 	instance = getminor(dev);
806 
807 	switch (infocmd) {
808 	case DDI_INFO_DEVT2DEVINFO:
809 		nvc = ddi_get_soft_state(nv_statep,  instance);
810 		if (nvc != NULL) {
811 			*result = nvc->nvc_dip;
812 			return (DDI_SUCCESS);
813 		} else {
814 			*result = NULL;
815 			return (DDI_FAILURE);
816 		}
817 	case DDI_INFO_DEVT2INSTANCE:
818 		*(int *)result = instance;
819 		break;
820 	default:
821 		break;
822 	}
823 	return (DDI_SUCCESS);
824 }
825 
826 
827 /*
828  * Called by sata module to probe a port.  Port and device state
829  * are not changed here... only reported back to the sata module.
830  *
831  * If probe confirms a device is present for the first time, it will
832  * initiate a device reset, then probe will be called again and the
833  * signature will be check.  If the signature is valid, data structures
834  * will be initialized.
835  */
836 static int
837 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
838 {
839 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
840 	uint8_t cport = sd->satadev_addr.cport;
841 	uint8_t pmport = sd->satadev_addr.pmport;
842 	uint8_t qual = sd->satadev_addr.qual;
843 	clock_t nv_lbolt = ddi_get_lbolt();
844 	nv_port_t *nvp;
845 
846 	if (cport >= NV_MAX_PORTS(nvc)) {
847 		sd->satadev_type = SATA_DTYPE_NONE;
848 		sd->satadev_state = SATA_STATE_UNKNOWN;
849 
850 		return (SATA_FAILURE);
851 	}
852 
853 	ASSERT(nvc->nvc_port != NULL);
854 	nvp = &(nvc->nvc_port[cport]);
855 	ASSERT(nvp != NULL);
856 
857 	NVLOG((NVDBG_PROBE, nvc, nvp,
858 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
859 	    "qual: 0x%x", cport, pmport, qual));
860 
861 	mutex_enter(&nvp->nvp_mutex);
862 
863 	/*
864 	 * This check seems to be done in the SATA module.
865 	 * It may not be required here
866 	 */
867 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
868 		nv_cmn_err(CE_WARN, nvc, nvp,
869 		    "port inactive.  Use cfgadm to activate");
870 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
871 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
872 		mutex_exit(&nvp->nvp_mutex);
873 
874 		return (SATA_FAILURE);
875 	}
876 
877 	if (qual == SATA_ADDR_PMPORT) {
878 		sd->satadev_type = SATA_DTYPE_NONE;
879 		sd->satadev_state = SATA_STATE_UNKNOWN;
880 		mutex_exit(&nvp->nvp_mutex);
881 		nv_cmn_err(CE_WARN, nvc, nvp,
882 		    "controller does not support port multiplier");
883 
884 		return (SATA_FAILURE);
885 	}
886 
887 	sd->satadev_state = SATA_PSTATE_PWRON;
888 
889 	nv_copy_registers(nvp, sd, NULL);
890 
891 	/*
892 	 * determine link status
893 	 */
894 	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
895 		uint8_t det;
896 
897 		/*
898 		 * Reset will cause the link to go down for a short period of
899 		 * time.  If link is lost for less than 2 seconds ignore it
900 		 * so that the reset can progress.
901 		 */
902 		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
903 
904 			if (nvp->nvp_link_lost_time == 0) {
905 				nvp->nvp_link_lost_time = nv_lbolt;
906 			}
907 
908 			if (TICK_TO_SEC(nv_lbolt -
909 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
910 				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
911 				    "probe: intermittent link lost while"
912 				    " resetting"));
913 				/*
914 				 * fake status of link so that probe continues
915 				 */
916 				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
917 				    SSTATUS_IPM_ACTIVE);
918 				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
919 				    SSTATUS_DET_DEVPRE_PHYCOM);
920 				sd->satadev_type = SATA_DTYPE_UNKNOWN;
921 				mutex_exit(&nvp->nvp_mutex);
922 
923 				return (SATA_SUCCESS);
924 			} else {
925 				nvp->nvp_state &=
926 				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
927 			}
928 		}
929 
930 		/*
931 		 * no link, so tear down port and abort all active packets
932 		 */
933 
934 		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
935 		    SSTATUS_DET_SHIFT;
936 
937 		switch (det) {
938 		case SSTATUS_DET_NODEV:
939 		case SSTATUS_DET_PHYOFFLINE:
940 			sd->satadev_type = SATA_DTYPE_NONE;
941 			break;
942 		default:
943 			sd->satadev_type = SATA_DTYPE_UNKNOWN;
944 			break;
945 		}
946 
947 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
948 		    "probe: link lost invoking nv_abort_active"));
949 
950 		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
951 		nv_uninit_port(nvp);
952 
953 		mutex_exit(&nvp->nvp_mutex);
954 
955 		return (SATA_SUCCESS);
956 	} else {
957 		nvp->nvp_link_lost_time = 0;
958 	}
959 
960 	/*
961 	 * A device is present so clear hotremoved flag
962 	 */
963 	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
964 
965 	/*
966 	 * If the signature was acquired previously there is no need to
967 	 * do it again.
968 	 */
969 	if (nvp->nvp_signature != 0) {
970 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
971 		    "probe: signature acquired previously"));
972 		sd->satadev_type = nvp->nvp_type;
973 		mutex_exit(&nvp->nvp_mutex);
974 
975 		return (SATA_SUCCESS);
976 	}
977 
978 	/*
979 	 * If NV_PORT_RESET is not set, this is the first time through
980 	 * so perform reset and return.
981 	 */
982 	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
983 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
984 		    "probe: first reset to get sig"));
985 		nvp->nvp_state |= NV_PORT_RESET_PROBE;
986 		nv_reset(nvp);
987 		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
988 		nvp->nvp_probe_time = nv_lbolt;
989 		mutex_exit(&nvp->nvp_mutex);
990 
991 		return (SATA_SUCCESS);
992 	}
993 
994 	/*
995 	 * Reset was done previously.  see if the signature is
996 	 * available.
997 	 */
998 	nv_read_signature(nvp);
999 	sd->satadev_type = nvp->nvp_type;
1000 
1001 	/*
1002 	 * Some drives may require additional resets to get a
1003 	 * valid signature.  If a drive was not just powered up, the signature
1004 	 * should arrive within half a second of reset.  Therefore if more
1005 	 * than 5 seconds has elapsed while waiting for a signature, reset
1006 	 * again.  These extra resets do not appear to create problems when
1007 	 * the drive is spinning up for more than this reset period.
1008 	 */
1009 	if (nvp->nvp_signature == 0) {
1010 		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
1011 			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
1012 			    " during signature acquisition"));
1013 			nv_reset(nvp);
1014 		}
1015 
1016 		mutex_exit(&nvp->nvp_mutex);
1017 
1018 		return (SATA_SUCCESS);
1019 	}
1020 
1021 	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1022 	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1023 
1024 	/*
1025 	 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far.  If
1026 	 * it is not either of those, then just return.
1027 	 */
1028 	if ((nvp->nvp_type != SATA_DTYPE_ATADISK) &&
1029 	    (nvp->nvp_type != SATA_DTYPE_ATAPICD)) {
1030 		NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only"
1031 		    " disks/CDs/DVDs.  Signature acquired was %X",
1032 		    nvp->nvp_signature));
1033 		mutex_exit(&nvp->nvp_mutex);
1034 
1035 		return (SATA_SUCCESS);
1036 	}
1037 
1038 	/*
1039 	 * make sure structures are initialized
1040 	 */
1041 	if (nv_init_port(nvp) == NV_SUCCESS) {
1042 		NVLOG((NVDBG_PROBE, nvc, nvp,
1043 		    "device detected and set up at port %d", cport));
1044 		mutex_exit(&nvp->nvp_mutex);
1045 
1046 		return (SATA_SUCCESS);
1047 	} else {
1048 		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1049 		    "structures for port %d", cport);
1050 		mutex_exit(&nvp->nvp_mutex);
1051 
1052 		return (SATA_FAILURE);
1053 	}
1054 	/*NOTREACHED*/
1055 }
1056 
1057 
1058 /*
1059  * Called by sata module to start a new command.
1060  */
1061 static int
1062 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1063 {
1064 	int cport = spkt->satapkt_device.satadev_addr.cport;
1065 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1066 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1067 	int ret;
1068 
1069 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1070 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1071 
1072 	mutex_enter(&nvp->nvp_mutex);
1073 
1074 	/*
1075 	 * hotremoved is an intermediate state where the link was lost,
1076 	 * but the hotplug event has not yet been processed by the sata
1077 	 * module.  Fail the request.
1078 	 */
1079 	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1080 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1081 		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1082 		NVLOG((NVDBG_ERRS, nvc, nvp,
1083 		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1084 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1085 		mutex_exit(&nvp->nvp_mutex);
1086 
1087 		return (SATA_TRAN_PORT_ERROR);
1088 	}
1089 
1090 	if (nvp->nvp_state & NV_PORT_RESET) {
1091 		NVLOG((NVDBG_ERRS, nvc, nvp,
1092 		    "still waiting for reset completion"));
1093 		spkt->satapkt_reason = SATA_PKT_BUSY;
1094 		mutex_exit(&nvp->nvp_mutex);
1095 
1096 		/*
1097 		 * If in panic, timeouts do not occur, so fake one
1098 		 * so that the signature can be acquired to complete
1099 		 * the reset handling.
1100 		 */
1101 		if (ddi_in_panic()) {
1102 			nv_timeout(nvp);
1103 		}
1104 
1105 		return (SATA_TRAN_BUSY);
1106 	}
1107 
1108 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1109 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1110 		NVLOG((NVDBG_ERRS, nvc, nvp,
1111 		    "nv_sata_start: SATA_DTYPE_NONE"));
1112 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1113 		mutex_exit(&nvp->nvp_mutex);
1114 
1115 		return (SATA_TRAN_PORT_ERROR);
1116 	}
1117 
1118 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1119 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1120 		nv_cmn_err(CE_WARN, nvc, nvp,
1121 		    "port multipliers not supported by controller");
1122 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1123 		mutex_exit(&nvp->nvp_mutex);
1124 
1125 		return (SATA_TRAN_CMD_UNSUPPORTED);
1126 	}
1127 
1128 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1129 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1130 		NVLOG((NVDBG_ERRS, nvc, nvp,
1131 		    "nv_sata_start: port not yet initialized"));
1132 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1133 		mutex_exit(&nvp->nvp_mutex);
1134 
1135 		return (SATA_TRAN_PORT_ERROR);
1136 	}
1137 
1138 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1139 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1140 		NVLOG((NVDBG_ERRS, nvc, nvp,
1141 		    "nv_sata_start: NV_PORT_INACTIVE"));
1142 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1143 		mutex_exit(&nvp->nvp_mutex);
1144 
1145 		return (SATA_TRAN_PORT_ERROR);
1146 	}
1147 
1148 	if (nvp->nvp_state & NV_PORT_FAILED) {
1149 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1150 		NVLOG((NVDBG_ERRS, nvc, nvp,
1151 		    "nv_sata_start: NV_PORT_FAILED state"));
1152 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1153 		mutex_exit(&nvp->nvp_mutex);
1154 
1155 		return (SATA_TRAN_PORT_ERROR);
1156 	}
1157 
1158 	/*
1159 	 * after a device reset, and then when sata module restore processing
1160 	 * is complete, the sata module will set sata_clear_dev_reset which
1161 	 * indicates that restore processing has completed and normal
1162 	 * non-restore related commands should be processed.
1163 	 */
1164 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1165 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1166 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1167 		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1168 	}
1169 
1170 	/*
1171 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1172 	 * only allow commands which restore device state.  The sata module
1173 	 * marks such commands with with sata_ignore_dev_reset.
1174 	 *
1175 	 * during coredump, nv_reset is called and but then the restore
1176 	 * doesn't happen.  For now, workaround by ignoring the wait for
1177 	 * restore if the system is panicing.
1178 	 */
1179 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1180 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1181 	    (ddi_in_panic() == 0)) {
1182 		spkt->satapkt_reason = SATA_PKT_BUSY;
1183 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1184 		    "nv_sata_start: waiting for restore "));
1185 		mutex_exit(&nvp->nvp_mutex);
1186 
1187 		return (SATA_TRAN_BUSY);
1188 	}
1189 
1190 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1191 		spkt->satapkt_reason = SATA_PKT_BUSY;
1192 		NVLOG((NVDBG_ERRS, nvc, nvp,
1193 		    "nv_sata_start: NV_PORT_ABORTING"));
1194 		mutex_exit(&nvp->nvp_mutex);
1195 
1196 		return (SATA_TRAN_BUSY);
1197 	}
1198 
1199 	if (spkt->satapkt_op_mode &
1200 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1201 
1202 		ret = nv_start_sync(nvp, spkt);
1203 
1204 		mutex_exit(&nvp->nvp_mutex);
1205 
1206 		return (ret);
1207 	}
1208 
1209 	/*
1210 	 * start command asynchronous command
1211 	 */
1212 	ret = nv_start_async(nvp, spkt);
1213 
1214 	mutex_exit(&nvp->nvp_mutex);
1215 
1216 	return (ret);
1217 }
1218 
1219 
1220 /*
1221  * SATA_OPMODE_POLLING implies the driver is in a
1222  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1223  * If only SATA_OPMODE_SYNCH is set, the driver can use
1224  * interrupts and sleep wait on a cv.
1225  *
1226  * If SATA_OPMODE_POLLING is set, the driver can't use
1227  * interrupts and must busy wait and simulate the
1228  * interrupts by waiting for BSY to be cleared.
1229  *
1230  * Synchronous mode has to return BUSY if there are
1231  * any other commands already on the drive.
1232  */
1233 static int
1234 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1235 {
1236 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1237 	int ret;
1238 
1239 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1240 
1241 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1242 		spkt->satapkt_reason = SATA_PKT_BUSY;
1243 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1244 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1245 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1246 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1247 		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1248 
1249 		return (SATA_TRAN_BUSY);
1250 	}
1251 
1252 	/*
1253 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1254 	 */
1255 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1256 	    servicing_interrupt()) {
1257 		spkt->satapkt_reason = SATA_PKT_BUSY;
1258 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1259 		    "SYNC mode not allowed during interrupt"));
1260 
1261 		return (SATA_TRAN_BUSY);
1262 
1263 	}
1264 
1265 	/*
1266 	 * disable interrupt generation if in polled mode
1267 	 */
1268 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1269 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1270 	}
1271 
1272 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1273 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1274 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1275 		}
1276 
1277 		return (ret);
1278 	}
1279 
1280 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1281 		mutex_exit(&nvp->nvp_mutex);
1282 		ret = nv_poll_wait(nvp, spkt);
1283 		mutex_enter(&nvp->nvp_mutex);
1284 
1285 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1286 
1287 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1288 		    " done % reason %d", ret));
1289 
1290 		return (ret);
1291 	}
1292 
1293 	/*
1294 	 * non-polling synchronous mode handling.  The interrupt will signal
1295 	 * when the IO is completed.
1296 	 */
1297 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1298 
1299 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1300 
1301 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1302 	}
1303 
1304 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1305 	    " done % reason %d", spkt->satapkt_reason));
1306 
1307 	return (SATA_TRAN_ACCEPTED);
1308 }
1309 
1310 
1311 static int
1312 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1313 {
1314 	int ret;
1315 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1316 #if ! defined(__lock_lint)
1317 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1318 #endif
1319 
1320 	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1321 
1322 	for (;;) {
1323 
1324 		NV_DELAY_NSEC(400);
1325 
1326 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1327 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1328 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1329 			mutex_enter(&nvp->nvp_mutex);
1330 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1331 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1332 			nv_reset(nvp);
1333 			nv_complete_io(nvp, spkt, 0);
1334 			mutex_exit(&nvp->nvp_mutex);
1335 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1336 			    "SATA_STATUS_BSY"));
1337 
1338 			return (SATA_TRAN_ACCEPTED);
1339 		}
1340 
1341 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1342 
1343 		/*
1344 		 * Simulate interrupt.
1345 		 */
1346 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1347 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1348 
1349 		if (ret != DDI_INTR_CLAIMED) {
1350 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1351 			    " unclaimed -- resetting"));
1352 			mutex_enter(&nvp->nvp_mutex);
1353 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1354 			nv_reset(nvp);
1355 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1356 			nv_complete_io(nvp, spkt, 0);
1357 			mutex_exit(&nvp->nvp_mutex);
1358 
1359 			return (SATA_TRAN_ACCEPTED);
1360 		}
1361 
1362 #if ! defined(__lock_lint)
1363 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1364 			/*
1365 			 * packet is complete
1366 			 */
1367 			return (SATA_TRAN_ACCEPTED);
1368 		}
1369 #endif
1370 	}
1371 	/*NOTREACHED*/
1372 }
1373 
1374 
1375 /*
1376  * Called by sata module to abort outstanding packets.
1377  */
1378 /*ARGSUSED*/
1379 static int
1380 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1381 {
1382 	int cport = spkt->satapkt_device.satadev_addr.cport;
1383 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1384 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1385 	int c_a, ret;
1386 
1387 	ASSERT(cport < NV_MAX_PORTS(nvc));
1388 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1389 
1390 	mutex_enter(&nvp->nvp_mutex);
1391 
1392 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1393 		mutex_exit(&nvp->nvp_mutex);
1394 		nv_cmn_err(CE_WARN, nvc, nvp,
1395 		    "abort request failed: port inactive");
1396 
1397 		return (SATA_FAILURE);
1398 	}
1399 
1400 	/*
1401 	 * spkt == NULL then abort all commands
1402 	 */
1403 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1404 
1405 	if (c_a) {
1406 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1407 		    "packets aborted running=%d", c_a));
1408 		ret = SATA_SUCCESS;
1409 	} else {
1410 		if (spkt == NULL) {
1411 			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1412 		} else {
1413 			NVLOG((NVDBG_ENTRY, nvc, nvp,
1414 			    "can't find spkt to abort"));
1415 		}
1416 		ret = SATA_FAILURE;
1417 	}
1418 
1419 	mutex_exit(&nvp->nvp_mutex);
1420 
1421 	return (ret);
1422 }
1423 
1424 
1425 /*
1426  * if spkt == NULL abort all pkts running, otherwise
1427  * abort the requested packet.  must be called with nv_mutex
1428  * held and returns with it held.  Not NCQ aware.
1429  */
1430 static int
1431 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1432 {
1433 	int aborted = 0, i, reset_once = B_FALSE;
1434 	struct nv_slot *nv_slotp;
1435 	sata_pkt_t *spkt_slot;
1436 
1437 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1438 
1439 	/*
1440 	 * return if the port is not configured
1441 	 */
1442 	if (nvp->nvp_slot == NULL) {
1443 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1444 		    "nv_abort_active: not configured so returning"));
1445 
1446 		return (0);
1447 	}
1448 
1449 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1450 
1451 	nvp->nvp_state |= NV_PORT_ABORTING;
1452 
1453 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1454 
1455 		nv_slotp = &(nvp->nvp_slot[i]);
1456 		spkt_slot = nv_slotp->nvslot_spkt;
1457 
1458 		/*
1459 		 * skip if not active command in slot
1460 		 */
1461 		if (spkt_slot == NULL) {
1462 			continue;
1463 		}
1464 
1465 		/*
1466 		 * if a specific packet was requested, skip if
1467 		 * this is not a match
1468 		 */
1469 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1470 			continue;
1471 		}
1472 
1473 		/*
1474 		 * stop the hardware.  This could need reworking
1475 		 * when NCQ is enabled in the driver.
1476 		 */
1477 		if (reset_once == B_FALSE) {
1478 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1479 
1480 			/*
1481 			 * stop DMA engine
1482 			 */
1483 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1484 
1485 			nv_reset(nvp);
1486 			reset_once = B_TRUE;
1487 		}
1488 
1489 		spkt_slot->satapkt_reason = abort_reason;
1490 		nv_complete_io(nvp, spkt_slot, i);
1491 		aborted++;
1492 	}
1493 
1494 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1495 
1496 	return (aborted);
1497 }
1498 
1499 
1500 /*
1501  * Called by sata module to reset a port, device, or the controller.
1502  */
1503 static int
1504 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1505 {
1506 	int cport = sd->satadev_addr.cport;
1507 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1508 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1509 	int ret = SATA_SUCCESS;
1510 
1511 	ASSERT(cport < NV_MAX_PORTS(nvc));
1512 
1513 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1514 
1515 	mutex_enter(&nvp->nvp_mutex);
1516 
1517 	switch (sd->satadev_addr.qual) {
1518 
1519 	case SATA_ADDR_CPORT:
1520 		/*FALLTHROUGH*/
1521 	case SATA_ADDR_DCPORT:
1522 		nv_reset(nvp);
1523 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1524 
1525 		break;
1526 	case SATA_ADDR_CNTRL:
1527 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1528 		    "nv_sata_reset: constroller reset not supported"));
1529 
1530 		break;
1531 	case SATA_ADDR_PMPORT:
1532 	case SATA_ADDR_DPMPORT:
1533 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1534 		    "nv_sata_reset: port multipliers not supported"));
1535 		/*FALLTHROUGH*/
1536 	default:
1537 		/*
1538 		 * unsupported case
1539 		 */
1540 		ret = SATA_FAILURE;
1541 		break;
1542 	}
1543 
1544 	if (ret == SATA_SUCCESS) {
1545 		/*
1546 		 * If the port is inactive, do a quiet reset and don't attempt
1547 		 * to wait for reset completion or do any post reset processing
1548 		 */
1549 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1550 			nvp->nvp_state &= ~NV_PORT_RESET;
1551 			nvp->nvp_reset_time = 0;
1552 		}
1553 
1554 		/*
1555 		 * clear the port failed flag
1556 		 */
1557 		nvp->nvp_state &= ~NV_PORT_FAILED;
1558 	}
1559 
1560 	mutex_exit(&nvp->nvp_mutex);
1561 
1562 	return (ret);
1563 }
1564 
1565 
1566 /*
1567  * Sata entry point to handle port activation.  cfgadm -c connect
1568  */
1569 static int
1570 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1571 {
1572 	int cport = sd->satadev_addr.cport;
1573 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1574 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1575 
1576 	ASSERT(cport < NV_MAX_PORTS(nvc));
1577 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1578 
1579 	mutex_enter(&nvp->nvp_mutex);
1580 
1581 	sd->satadev_state = SATA_STATE_READY;
1582 
1583 	nv_copy_registers(nvp, sd, NULL);
1584 
1585 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1586 
1587 	nvp->nvp_state = 0;
1588 
1589 	mutex_exit(&nvp->nvp_mutex);
1590 
1591 	return (SATA_SUCCESS);
1592 }
1593 
1594 
1595 /*
1596  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1597  */
1598 static int
1599 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1600 {
1601 	int cport = sd->satadev_addr.cport;
1602 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1603 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1604 
1605 	ASSERT(cport < NV_MAX_PORTS(nvc));
1606 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1607 
1608 	mutex_enter(&nvp->nvp_mutex);
1609 
1610 	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1611 
1612 	/*
1613 	 * mark the device as inaccessible
1614 	 */
1615 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1616 
1617 	/*
1618 	 * disable the interrupts on port
1619 	 */
1620 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1621 
1622 	nv_uninit_port(nvp);
1623 
1624 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1625 	nv_copy_registers(nvp, sd, NULL);
1626 
1627 	mutex_exit(&nvp->nvp_mutex);
1628 
1629 	return (SATA_SUCCESS);
1630 }
1631 
1632 
1633 /*
1634  * find an empty slot in the driver's queue, increment counters,
1635  * and then invoke the appropriate PIO or DMA start routine.
1636  */
1637 static int
1638 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1639 {
1640 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1641 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1642 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1643 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1644 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1645 	nv_slot_t *nv_slotp;
1646 	boolean_t dma_cmd;
1647 
1648 	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1649 	    sata_cmdp->satacmd_cmd_reg));
1650 
1651 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1652 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1653 		nvp->nvp_ncq_run++;
1654 		/*
1655 		 * search for an empty NCQ slot.  by the time, it's already
1656 		 * been determined by the caller that there is room on the
1657 		 * queue.
1658 		 */
1659 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1660 		    on_bit <<= 1) {
1661 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1662 				break;
1663 			}
1664 		}
1665 
1666 		/*
1667 		 * the first empty slot found, should not exceed the queue
1668 		 * depth of the drive.  if it does it's an error.
1669 		 */
1670 		ASSERT(slot != nvp->nvp_queue_depth);
1671 
1672 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1673 		    nvp->nvp_sactive);
1674 		ASSERT((sactive & on_bit) == 0);
1675 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1676 		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1677 		    on_bit));
1678 		nvp->nvp_sactive_cache |= on_bit;
1679 
1680 		ncq = NVSLOT_NCQ;
1681 
1682 	} else {
1683 		nvp->nvp_non_ncq_run++;
1684 		slot = 0;
1685 	}
1686 
1687 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1688 
1689 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1690 
1691 	nv_slotp->nvslot_spkt = spkt;
1692 	nv_slotp->nvslot_flags = ncq;
1693 
1694 	/*
1695 	 * the sata module doesn't indicate which commands utilize the
1696 	 * DMA engine, so find out using this switch table.
1697 	 */
1698 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1699 	case SATAC_READ_DMA_EXT:
1700 	case SATAC_WRITE_DMA_EXT:
1701 	case SATAC_WRITE_DMA:
1702 	case SATAC_READ_DMA:
1703 	case SATAC_READ_DMA_QUEUED:
1704 	case SATAC_READ_DMA_QUEUED_EXT:
1705 	case SATAC_WRITE_DMA_QUEUED:
1706 	case SATAC_WRITE_DMA_QUEUED_EXT:
1707 	case SATAC_READ_FPDMA_QUEUED:
1708 	case SATAC_WRITE_FPDMA_QUEUED:
1709 		dma_cmd = B_TRUE;
1710 		break;
1711 	default:
1712 		dma_cmd = B_FALSE;
1713 	}
1714 
1715 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1716 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1717 		nv_slotp->nvslot_start = nv_start_dma;
1718 		nv_slotp->nvslot_intr = nv_intr_dma;
1719 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1720 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "packet command"));
1721 		nv_slotp->nvslot_start = nv_start_pkt_pio;
1722 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1723 		if ((direction == SATA_DIR_READ) ||
1724 		    (direction == SATA_DIR_WRITE)) {
1725 			nv_slotp->nvslot_byte_count =
1726 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1727 			nv_slotp->nvslot_v_addr =
1728 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1729 			/*
1730 			 * Freeing DMA resources allocated by the framework
1731 			 * now to avoid buffer overwrite (dma sync) problems
1732 			 * when the buffer is released at command completion.
1733 			 * Primarily an issue on systems with more than
1734 			 * 4GB of memory.
1735 			 */
1736 			sata_free_dma_resources(spkt);
1737 		}
1738 	} else if (direction == SATA_DIR_NODATA_XFER) {
1739 		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
1740 		nv_slotp->nvslot_start = nv_start_nodata;
1741 		nv_slotp->nvslot_intr = nv_intr_nodata;
1742 	} else if (direction == SATA_DIR_READ) {
1743 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
1744 		nv_slotp->nvslot_start = nv_start_pio_in;
1745 		nv_slotp->nvslot_intr = nv_intr_pio_in;
1746 		nv_slotp->nvslot_byte_count =
1747 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1748 		nv_slotp->nvslot_v_addr =
1749 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1750 		/*
1751 		 * Freeing DMA resources allocated by the framework now to
1752 		 * avoid buffer overwrite (dma sync) problems when the buffer
1753 		 * is released at command completion.  This is not an issue
1754 		 * for write because write does not update the buffer.
1755 		 * Primarily an issue on systems with more than 4GB of memory.
1756 		 */
1757 		sata_free_dma_resources(spkt);
1758 	} else if (direction == SATA_DIR_WRITE) {
1759 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
1760 		nv_slotp->nvslot_start = nv_start_pio_out;
1761 		nv_slotp->nvslot_intr = nv_intr_pio_out;
1762 		nv_slotp->nvslot_byte_count =
1763 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1764 		nv_slotp->nvslot_v_addr =
1765 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1766 	} else {
1767 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
1768 		    " %d cookies %d cmd %x",
1769 		    sata_cmdp->satacmd_flags.sata_data_direction,
1770 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
1771 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1772 		ret = SATA_TRAN_CMD_UNSUPPORTED;
1773 
1774 		goto fail;
1775 	}
1776 
1777 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
1778 	    SATA_TRAN_ACCEPTED) {
1779 		nv_slotp->nvslot_stime = ddi_get_lbolt();
1780 
1781 		/*
1782 		 * start timer if it's not already running and this packet
1783 		 * is not requesting polled mode.
1784 		 */
1785 		if ((nvp->nvp_timeout_id == 0) &&
1786 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
1787 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
1788 			    drv_usectohz(NV_ONE_SEC));
1789 		}
1790 
1791 		return (SATA_TRAN_ACCEPTED);
1792 	}
1793 
1794 	fail:
1795 
1796 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
1797 
1798 	if (ncq == NVSLOT_NCQ) {
1799 		nvp->nvp_ncq_run--;
1800 		nvp->nvp_sactive_cache &= ~on_bit;
1801 	} else {
1802 		nvp->nvp_non_ncq_run--;
1803 	}
1804 	nv_slotp->nvslot_spkt = NULL;
1805 	nv_slotp->nvslot_flags = 0;
1806 
1807 	return (ret);
1808 }
1809 
1810 
1811 /*
1812  * Check if the signature is ready and if non-zero translate
1813  * it into a solaris sata defined type.
1814  */
1815 static void
1816 nv_read_signature(nv_port_t *nvp)
1817 {
1818 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
1819 
1820 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
1821 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
1822 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
1823 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
1824 
1825 	switch (nvp->nvp_signature) {
1826 
1827 	case NV_SIG_DISK:
1828 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
1829 		nvp->nvp_type = SATA_DTYPE_ATADISK;
1830 		break;
1831 	case NV_SIG_ATAPI:
1832 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1833 		    "drive is an optical device"));
1834 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
1835 		break;
1836 	case NV_SIG_PM:
1837 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1838 		    "device is a port multiplier"));
1839 		nvp->nvp_type = SATA_DTYPE_PMULT;
1840 		break;
1841 	case NV_SIG_NOTREADY:
1842 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1843 		    "signature not ready"));
1844 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1845 		break;
1846 	default:
1847 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
1848 		    " recognized", nvp->nvp_signature);
1849 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1850 		break;
1851 	}
1852 
1853 	if (nvp->nvp_signature) {
1854 		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1855 	}
1856 }
1857 
1858 
1859 /*
1860  * Reset the port
1861  */
1862 static void
1863 nv_reset(nv_port_t *nvp)
1864 {
1865 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1866 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
1867 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1868 	uint32_t sctrl;
1869 
1870 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
1871 
1872 	ASSERT(mutex_owned(&nvp->nvp_mutex));
1873 
1874 	/*
1875 	 * clear signature registers
1876 	 */
1877 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
1878 	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
1879 	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
1880 	nv_put8(cmdhdl, nvp->nvp_count, 0);
1881 
1882 	nvp->nvp_signature = 0;
1883 	nvp->nvp_type = 0;
1884 	nvp->nvp_state |= NV_PORT_RESET;
1885 	nvp->nvp_reset_time = ddi_get_lbolt();
1886 	nvp->nvp_link_lost_time = 0;
1887 
1888 	/*
1889 	 * assert reset in PHY by writing a 1 to bit 0 scontrol
1890 	 */
1891 	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
1892 
1893 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
1894 
1895 	/*
1896 	 * wait 1ms
1897 	 */
1898 	drv_usecwait(1000);
1899 
1900 	/*
1901 	 * de-assert reset in PHY
1902 	 */
1903 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
1904 
1905 	/*
1906 	 * make sure timer is running
1907 	 */
1908 	if (nvp->nvp_timeout_id == 0) {
1909 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
1910 		    drv_usectohz(NV_ONE_SEC));
1911 	}
1912 }
1913 
1914 
1915 /*
1916  * Initialize register handling specific to mcp55
1917  */
1918 /* ARGSUSED */
1919 static void
1920 mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
1921 {
1922 	nv_port_t *nvp;
1923 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
1924 	uint8_t off, port;
1925 
1926 	nvc->nvc_mcp55_ctl = (uint32_t *)(bar5 + MCP55_CTL);
1927 	nvc->nvc_mcp55_ncq = (uint32_t *)(bar5 + MCP55_NCQ);
1928 
1929 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
1930 		nvp = &(nvc->nvc_port[port]);
1931 		nvp->nvp_mcp55_int_status =
1932 		    (uint16_t *)(bar5 + MCP55_INT_STATUS + off);
1933 		nvp->nvp_mcp55_int_ctl =
1934 		    (uint16_t *)(bar5 + MCP55_INT_CTL + off);
1935 
1936 		/*
1937 		 * clear any previous interrupts asserted
1938 		 */
1939 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_status,
1940 		    MCP55_INT_CLEAR);
1941 
1942 		/*
1943 		 * These are the interrupts to accept for now.  The spec
1944 		 * says these are enable bits, but nvidia has indicated
1945 		 * these are masking bits.  Even though they may be masked
1946 		 * out to prevent asserting the main interrupt, they can
1947 		 * still be asserted while reading the interrupt status
1948 		 * register, so that needs to be considered in the interrupt
1949 		 * handler.
1950 		 */
1951 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_ctl,
1952 		    ~(MCP55_INT_IGNORE));
1953 	}
1954 
1955 	/*
1956 	 * Allow the driver to program the BM on the first command instead
1957 	 * of waiting for an interrupt.
1958 	 */
1959 #ifdef NCQ
1960 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
1961 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq, flags);
1962 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
1963 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ctl, flags);
1964 #endif
1965 
1966 
1967 #if 0
1968 	/*
1969 	 * This caused problems on some but not all mcp55 based systems.
1970 	 * DMA writes would never complete.  This happens even on small
1971 	 * mem systems, and only setting NV_40BIT_PRD below and not
1972 	 * buffer_dma_attr.dma_attr_addr_hi, so it seems to be a hardware
1973 	 * issue that needs further investigation.
1974 	 */
1975 
1976 	/*
1977 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
1978 	 * Enable DMA to take advantage of that.
1979 	 *
1980 	 */
1981 	if (nvc->nvc_revid >= 0xa3) {
1982 		uint32_t reg32;
1983 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev id is %X and"
1984 		    " is capable of 40-bit addressing", nvc->nvc_revid));
1985 		buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull;
1986 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
1987 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
1988 		    reg32 |NV_40BIT_PRD);
1989 	} else {
1990 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is "
1991 		    "not capable of 40-bit addressing", nvc->nvc_revid));
1992 	}
1993 #endif
1994 
1995 }
1996 
1997 
1998 /*
1999  * Initialize register handling specific to mcp04
2000  */
2001 static void
2002 mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2003 {
2004 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2005 	uint32_t reg32;
2006 	uint16_t reg16;
2007 	nv_port_t *nvp;
2008 	int j;
2009 
2010 	/*
2011 	 * delay hotplug interrupts until PHYRDY.
2012 	 */
2013 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2014 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2015 	    reg32 | MCP04_CFG_DELAY_HOTPLUG_INTR);
2016 
2017 	/*
2018 	 * enable hot plug interrupts for channel x and y
2019 	 */
2020 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2021 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2022 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2023 	    NV_HIRQ_EN | reg16);
2024 
2025 
2026 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2027 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2028 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2029 	    NV_HIRQ_EN | reg16);
2030 
2031 	nvc->nvc_mcp04_int_status = (uint8_t *)(bar5 + MCP04_SATA_INT_STATUS);
2032 
2033 	/*
2034 	 * clear any existing interrupt pending then enable
2035 	 */
2036 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2037 		nvp = &(nvc->nvc_port[j]);
2038 		mutex_enter(&nvp->nvp_mutex);
2039 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2040 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2041 		mutex_exit(&nvp->nvp_mutex);
2042 	}
2043 }
2044 
2045 
2046 /*
2047  * Initialize the controller and set up driver data structures.
2048  * determine if ck804 or mcp55 class.
2049  */
2050 static int
2051 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2052 {
2053 	struct sata_hba_tran stran;
2054 	nv_port_t *nvp;
2055 	int j, ck804 = B_TRUE;
2056 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2057 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2058 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2059 	uint32_t reg32;
2060 	uint8_t reg8, reg8_save;
2061 
2062 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2063 
2064 	/*
2065 	 * Need to set bit 2 to 1 at config offset 0x50
2066 	 * to enable access to the bar5 registers.
2067 	 */
2068 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2069 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2070 	    reg32 | NV_BAR5_SPACE_EN);
2071 
2072 	/*
2073 	 * Determine if this is ck804 or mcp55.  ck804 will map in the
2074 	 * task file registers into bar5 while mcp55 won't.  The offset of
2075 	 * the task file registers in mcp55's space is unused, so it will
2076 	 * return zero.  So check one of the task file registers to see if it is
2077 	 * writable and reads back what was written.  If it's mcp55 it will
2078 	 * return back 0xff whereas ck804 will return the value written.
2079 	 */
2080 	reg8_save = nv_get8(bar5_hdl,
2081 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2082 
2083 
2084 	for (j = 1; j < 3; j++) {
2085 
2086 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2087 		reg8 = nv_get8(bar5_hdl,
2088 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2089 
2090 		if (reg8 != j) {
2091 			ck804 = B_FALSE;
2092 			break;
2093 		}
2094 	}
2095 
2096 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2097 
2098 	if (ck804 == B_TRUE) {
2099 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2100 		nvc->nvc_interrupt = mcp04_intr;
2101 		nvc->nvc_reg_init = mcp04_reg_init;
2102 		nvc->nvc_set_intr = mcp04_set_intr;
2103 	} else {
2104 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP55"));
2105 		nvc->nvc_interrupt = mcp55_intr;
2106 		nvc->nvc_reg_init = mcp55_reg_init;
2107 		nvc->nvc_set_intr = mcp55_set_intr;
2108 	}
2109 
2110 
2111 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2;
2112 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2113 	stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2114 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2115 	stran.sata_tran_hba_features_support =
2116 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2117 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2118 	stran.sata_tran_probe_port = nv_sata_probe;
2119 	stran.sata_tran_start = nv_sata_start;
2120 	stran.sata_tran_abort = nv_sata_abort;
2121 	stran.sata_tran_reset_dport = nv_sata_reset;
2122 	stran.sata_tran_selftest = NULL;
2123 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2124 	stran.sata_tran_pwrmgt_ops = NULL;
2125 	stran.sata_tran_ioctl = NULL;
2126 	nvc->nvc_sata_hba_tran = stran;
2127 
2128 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2129 	    KM_SLEEP);
2130 
2131 	/*
2132 	 * initialize registers common to all chipsets
2133 	 */
2134 	nv_common_reg_init(nvc);
2135 
2136 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2137 		nvp = &(nvc->nvc_port[j]);
2138 
2139 		cmd_addr = nvp->nvp_cmd_addr;
2140 		ctl_addr = nvp->nvp_ctl_addr;
2141 		bm_addr = nvp->nvp_bm_addr;
2142 
2143 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2144 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2145 
2146 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2147 
2148 		nvp->nvp_data	= cmd_addr + NV_DATA;
2149 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2150 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2151 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2152 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2153 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2154 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2155 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2156 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2157 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2158 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2159 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2160 
2161 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2162 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2163 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2164 
2165 		nvp->nvp_state = 0;
2166 	}
2167 
2168 	/*
2169 	 * initialize register by calling chip specific reg initialization
2170 	 */
2171 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2172 
2173 	return (NV_SUCCESS);
2174 }
2175 
2176 
2177 /*
2178  * Initialize data structures with enough slots to handle queuing, if
2179  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2180  * NCQ support is built into the driver and enabled.  It might have been
2181  * better to derive the true size from the drive itself, but the sata
2182  * module only sends down that information on the first NCQ command,
2183  * which means possibly re-sizing the structures on an interrupt stack,
2184  * making error handling more messy.  The easy way is to just allocate
2185  * all 32 slots, which is what most drives support anyway.
2186  */
2187 static int
2188 nv_init_port(nv_port_t *nvp)
2189 {
2190 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2191 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2192 	dev_info_t *dip = nvc->nvc_dip;
2193 	ddi_device_acc_attr_t dev_attr;
2194 	size_t buf_size;
2195 	ddi_dma_cookie_t cookie;
2196 	uint_t count;
2197 	int rc, i;
2198 
2199 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2200 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2201 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2202 
2203 	if (nvp->nvp_state & NV_PORT_INIT) {
2204 		NVLOG((NVDBG_INIT, nvc, nvp,
2205 		    "nv_init_port previously initialized"));
2206 
2207 		return (NV_SUCCESS);
2208 	} else {
2209 		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2210 	}
2211 
2212 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2213 	    NV_QUEUE_SLOTS, KM_SLEEP);
2214 
2215 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2216 	    NV_QUEUE_SLOTS, KM_SLEEP);
2217 
2218 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2219 	    NV_QUEUE_SLOTS, KM_SLEEP);
2220 
2221 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2222 	    NV_QUEUE_SLOTS, KM_SLEEP);
2223 
2224 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2225 	    KM_SLEEP);
2226 
2227 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2228 
2229 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2230 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2231 
2232 		if (rc != DDI_SUCCESS) {
2233 			nv_uninit_port(nvp);
2234 
2235 			return (NV_FAILURE);
2236 		}
2237 
2238 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2239 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2240 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2241 		    &(nvp->nvp_sg_acc_hdl[i]));
2242 
2243 		if (rc != DDI_SUCCESS) {
2244 			nv_uninit_port(nvp);
2245 
2246 			return (NV_FAILURE);
2247 		}
2248 
2249 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2250 		    nvp->nvp_sg_addr[i], buf_size,
2251 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2252 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2253 
2254 		if (rc != DDI_DMA_MAPPED) {
2255 			nv_uninit_port(nvp);
2256 
2257 			return (NV_FAILURE);
2258 		}
2259 
2260 		ASSERT(count == 1);
2261 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2262 
2263 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2264 
2265 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2266 	}
2267 
2268 	/*
2269 	 * nvp_queue_depth represents the actual drive queue depth, not the
2270 	 * number of slots allocated in the structures (which may be more).
2271 	 * Actual queue depth is only learned after the first NCQ command, so
2272 	 * initialize it to 1 for now.
2273 	 */
2274 	nvp->nvp_queue_depth = 1;
2275 
2276 	nvp->nvp_state |= NV_PORT_INIT;
2277 
2278 	return (NV_SUCCESS);
2279 }
2280 
2281 
2282 /*
2283  * Free dynamically allocated structures for port.
2284  */
2285 static void
2286 nv_uninit_port(nv_port_t *nvp)
2287 {
2288 	int i;
2289 
2290 	/*
2291 	 * It is possible to reach here before a port has been initialized or
2292 	 * after it has already been uninitialized.  Just return in that case.
2293 	 */
2294 	if (nvp->nvp_slot == NULL) {
2295 
2296 		return;
2297 	}
2298 
2299 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2300 	    "nv_uninit_port uninitializing"));
2301 
2302 	nvp->nvp_type = SATA_DTYPE_NONE;
2303 
2304 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2305 		if (nvp->nvp_sg_paddr[i]) {
2306 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2307 		}
2308 
2309 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2310 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2311 		}
2312 
2313 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2314 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2315 		}
2316 	}
2317 
2318 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2319 	nvp->nvp_slot = NULL;
2320 
2321 	kmem_free(nvp->nvp_sg_dma_hdl,
2322 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2323 	nvp->nvp_sg_dma_hdl = NULL;
2324 
2325 	kmem_free(nvp->nvp_sg_acc_hdl,
2326 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2327 	nvp->nvp_sg_acc_hdl = NULL;
2328 
2329 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2330 	nvp->nvp_sg_addr = NULL;
2331 
2332 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2333 	nvp->nvp_sg_paddr = NULL;
2334 
2335 	nvp->nvp_state &= ~NV_PORT_INIT;
2336 	nvp->nvp_signature = 0;
2337 }
2338 
2339 
2340 /*
2341  * Cache register offsets and access handles to frequently accessed registers
2342  * which are common to either chipset.
2343  */
2344 static void
2345 nv_common_reg_init(nv_ctl_t *nvc)
2346 {
2347 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2348 	uchar_t *bm_addr_offset, *sreg_offset;
2349 	uint8_t bar, port;
2350 	nv_port_t *nvp;
2351 
2352 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2353 		if (port == 0) {
2354 			bar = NV_BAR_0;
2355 			bm_addr_offset = 0;
2356 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2357 		} else {
2358 			bar = NV_BAR_2;
2359 			bm_addr_offset = (uchar_t *)8;
2360 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2361 		}
2362 
2363 		nvp = &(nvc->nvc_port[port]);
2364 		nvp->nvp_ctlp = nvc;
2365 		nvp->nvp_port_num = port;
2366 		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2367 
2368 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2369 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2370 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2371 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2372 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2373 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2374 		    (long)bm_addr_offset;
2375 
2376 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2377 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2378 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2379 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2380 	}
2381 }
2382 
2383 
2384 static void
2385 nv_uninit_ctl(nv_ctl_t *nvc)
2386 {
2387 	int port;
2388 	nv_port_t *nvp;
2389 
2390 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2391 
2392 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2393 		nvp = &(nvc->nvc_port[port]);
2394 		mutex_enter(&nvp->nvp_mutex);
2395 		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2396 		nv_uninit_port(nvp);
2397 		mutex_exit(&nvp->nvp_mutex);
2398 		mutex_destroy(&nvp->nvp_mutex);
2399 		cv_destroy(&nvp->nvp_poll_cv);
2400 	}
2401 
2402 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2403 	nvc->nvc_port = NULL;
2404 }
2405 
2406 
2407 /*
2408  * mcp04 interrupt.  This is a wrapper around mcp04_intr_process so
2409  * that interrupts from other devices can be disregarded while dtracing.
2410  */
2411 /* ARGSUSED */
2412 static uint_t
2413 mcp04_intr(caddr_t arg1, caddr_t arg2)
2414 {
2415 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2416 	uint8_t intr_status;
2417 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2418 
2419 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2420 
2421 	if (intr_status == 0) {
2422 
2423 		return (DDI_INTR_UNCLAIMED);
2424 	}
2425 
2426 	mcp04_intr_process(nvc, intr_status);
2427 
2428 	return (DDI_INTR_CLAIMED);
2429 }
2430 
2431 
2432 /*
2433  * Main interrupt handler for ck804.  handles normal device
2434  * interrupts as well as port hot plug and remove interrupts.
2435  *
2436  */
2437 static void
2438 mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2439 {
2440 
2441 	int port, i;
2442 	nv_port_t *nvp;
2443 	nv_slot_t *nv_slotp;
2444 	uchar_t	status;
2445 	sata_pkt_t *spkt;
2446 	uint8_t bmstatus, clear_bits;
2447 	ddi_acc_handle_t bmhdl;
2448 	int nvcleared = 0;
2449 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2450 	uint32_t sstatus;
2451 	int port_mask_hot[] = {
2452 		MCP04_INT_PDEV_HOT, MCP04_INT_SDEV_HOT,
2453 	};
2454 	int port_mask_pm[] = {
2455 		MCP04_INT_PDEV_PM, MCP04_INT_SDEV_PM,
2456 	};
2457 
2458 	NVLOG((NVDBG_INTR, nvc, NULL,
2459 	    "mcp04_intr_process entered intr_status=%x", intr_status));
2460 
2461 	/*
2462 	 * For command completion interrupt, explicit clear is not required.
2463 	 * however, for the error cases explicit clear is performed.
2464 	 */
2465 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2466 
2467 		int port_mask[] = {MCP04_INT_PDEV_INT, MCP04_INT_SDEV_INT};
2468 
2469 		if ((port_mask[port] & intr_status) == 0) {
2470 			continue;
2471 		}
2472 
2473 		NVLOG((NVDBG_INTR, nvc, NULL,
2474 		    "mcp04_intr_process interrupt on port %d", port));
2475 
2476 		nvp = &(nvc->nvc_port[port]);
2477 
2478 		mutex_enter(&nvp->nvp_mutex);
2479 
2480 		/*
2481 		 * there was a corner case found where an interrupt
2482 		 * arrived before nvp_slot was set.  Should
2483 		 * probably should track down why that happens and try
2484 		 * to eliminate that source and then get rid of this
2485 		 * check.
2486 		 */
2487 		if (nvp->nvp_slot == NULL) {
2488 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2489 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2490 			    "received before initialization "
2491 			    "completed status=%x", status));
2492 			mutex_exit(&nvp->nvp_mutex);
2493 
2494 			/*
2495 			 * clear interrupt bits
2496 			 */
2497 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2498 			    port_mask[port]);
2499 
2500 			continue;
2501 		}
2502 
2503 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2504 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2505 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2506 			    " no command in progress status=%x", status));
2507 			mutex_exit(&nvp->nvp_mutex);
2508 
2509 			/*
2510 			 * clear interrupt bits
2511 			 */
2512 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2513 			    port_mask[port]);
2514 
2515 			continue;
2516 		}
2517 
2518 		bmhdl = nvp->nvp_bm_hdl;
2519 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2520 
2521 		if (!(bmstatus & BMISX_IDEINTS)) {
2522 			mutex_exit(&nvp->nvp_mutex);
2523 
2524 			continue;
2525 		}
2526 
2527 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2528 
2529 		if (status & SATA_STATUS_BSY) {
2530 			mutex_exit(&nvp->nvp_mutex);
2531 
2532 			continue;
2533 		}
2534 
2535 		nv_slotp = &(nvp->nvp_slot[0]);
2536 
2537 		ASSERT(nv_slotp);
2538 
2539 		spkt = nv_slotp->nvslot_spkt;
2540 
2541 		if (spkt == NULL) {
2542 			mutex_exit(&nvp->nvp_mutex);
2543 
2544 			continue;
2545 		}
2546 
2547 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2548 
2549 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2550 
2551 		/*
2552 		 * If there is no link cannot be certain about the completion
2553 		 * of the packet, so abort it.
2554 		 */
2555 		if (nv_check_link((&spkt->satapkt_device)->
2556 		    satadev_scr.sstatus) == B_FALSE) {
2557 
2558 			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2559 
2560 		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2561 
2562 			nv_complete_io(nvp, spkt, 0);
2563 		}
2564 
2565 		mutex_exit(&nvp->nvp_mutex);
2566 	}
2567 
2568 	/*
2569 	 * mcp04 often doesn't correctly distinguish hot add/remove
2570 	 * interrupts.  Frequently both the ADD and the REMOVE bits
2571 	 * are asserted, whether it was a remove or add.  Use sstatus
2572 	 * to distinguish hot add from hot remove.
2573 	 */
2574 
2575 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2576 		clear_bits = 0;
2577 
2578 		nvp = &(nvc->nvc_port[port]);
2579 		mutex_enter(&nvp->nvp_mutex);
2580 
2581 		if ((port_mask_pm[port] & intr_status) != 0) {
2582 			clear_bits = port_mask_pm[port];
2583 			NVLOG((NVDBG_HOT, nvc, nvp,
2584 			    "clearing PM interrupt bit: %x",
2585 			    intr_status & port_mask_pm[port]));
2586 		}
2587 
2588 		if ((port_mask_hot[port] & intr_status) == 0) {
2589 			if (clear_bits != 0) {
2590 				goto clear;
2591 			} else {
2592 				mutex_exit(&nvp->nvp_mutex);
2593 				continue;
2594 			}
2595 		}
2596 
2597 		/*
2598 		 * reaching here means there was a hot add or remove.
2599 		 */
2600 		clear_bits |= port_mask_hot[port];
2601 
2602 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2603 
2604 		sstatus = nv_get32(bar5_hdl,
2605 		    nvc->nvc_port[port].nvp_sstatus);
2606 
2607 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2608 		    SSTATUS_DET_DEVPRE_PHYCOM) {
2609 			nv_report_add_remove(nvp, 0);
2610 		} else {
2611 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2612 		}
2613 	clear:
2614 		/*
2615 		 * clear interrupt bits.  explicit interrupt clear is
2616 		 * required for hotplug interrupts.
2617 		 */
2618 		nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status, clear_bits);
2619 
2620 		/*
2621 		 * make sure it's flushed and cleared.  If not try
2622 		 * again.  Sometimes it has been observed to not clear
2623 		 * on the first try.
2624 		 */
2625 		intr_status = nv_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2626 
2627 		/*
2628 		 * make 10 additional attempts to clear the interrupt
2629 		 */
2630 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2631 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2632 			    "still not clear try=%d", intr_status,
2633 			    ++nvcleared));
2634 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2635 			    clear_bits);
2636 			intr_status = nv_get8(bar5_hdl,
2637 			    nvc->nvc_mcp04_int_status);
2638 		}
2639 
2640 		/*
2641 		 * if still not clear, log a message and disable the
2642 		 * port. highly unlikely that this path is taken, but it
2643 		 * gives protection against a wedged interrupt.
2644 		 */
2645 		if (intr_status & clear_bits) {
2646 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2647 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2648 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2649 			nvp->nvp_state |= NV_PORT_FAILED;
2650 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2651 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2652 			    "interrupt.  disabling port intr_status=%X",
2653 			    intr_status);
2654 		}
2655 
2656 		mutex_exit(&nvp->nvp_mutex);
2657 	}
2658 }
2659 
2660 
2661 /*
2662  * Interrupt handler for mcp55.  It is invoked by the wrapper for each port
2663  * on the controller, to handle completion and hot plug and remove events.
2664  *
2665  */
2666 static uint_t
2667 mcp55_intr_port(nv_port_t *nvp)
2668 {
2669 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2670 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2671 	uint8_t clear = 0, intr_cycles = 0;
2672 	int ret = DDI_INTR_UNCLAIMED;
2673 	uint16_t int_status;
2674 
2675 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
2676 
2677 	for (;;) {
2678 		/*
2679 		 * read current interrupt status
2680 		 */
2681 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_status);
2682 
2683 		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2684 
2685 		/*
2686 		 * MCP55_INT_IGNORE interrupts will show up in the status,
2687 		 * but are masked out from causing an interrupt to be generated
2688 		 * to the processor.  Ignore them here by masking them out.
2689 		 */
2690 		int_status &= ~(MCP55_INT_IGNORE);
2691 
2692 		/*
2693 		 * exit the loop when no more interrupts to process
2694 		 */
2695 		if (int_status == 0) {
2696 
2697 			break;
2698 		}
2699 
2700 		if (int_status & MCP55_INT_COMPLETE) {
2701 			NVLOG((NVDBG_INTR, nvc, nvp,
2702 			    "mcp55_packet_complete_intr"));
2703 			/*
2704 			 * since int_status was set, return DDI_INTR_CLAIMED
2705 			 * from the DDI's perspective even though the packet
2706 			 * completion may not have succeeded.  If it fails,
2707 			 * need to manually clear the interrupt, otherwise
2708 			 * clearing is implicit.
2709 			 */
2710 			ret = DDI_INTR_CLAIMED;
2711 			if (mcp55_packet_complete_intr(nvc, nvp) ==
2712 			    NV_FAILURE) {
2713 				clear = MCP55_INT_COMPLETE;
2714 			} else {
2715 				intr_cycles = 0;
2716 			}
2717 		}
2718 
2719 		if (int_status & MCP55_INT_DMA_SETUP) {
2720 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr"));
2721 
2722 			/*
2723 			 * Needs to be cleared before starting the BM, so do it
2724 			 * now.  make sure this is still working.
2725 			 */
2726 			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status,
2727 			    MCP55_INT_DMA_SETUP);
2728 #ifdef NCQ
2729 			ret = mcp55_dma_setup_intr(nvc, nvp);
2730 #endif
2731 		}
2732 
2733 		if (int_status & MCP55_INT_REM) {
2734 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55 device removed"));
2735 			clear = MCP55_INT_REM;
2736 			ret = DDI_INTR_CLAIMED;
2737 
2738 			mutex_enter(&nvp->nvp_mutex);
2739 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2740 			mutex_exit(&nvp->nvp_mutex);
2741 
2742 		} else if (int_status & MCP55_INT_ADD) {
2743 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp55 device added"));
2744 			clear = MCP55_INT_ADD;
2745 			ret = DDI_INTR_CLAIMED;
2746 
2747 			mutex_enter(&nvp->nvp_mutex);
2748 			nv_report_add_remove(nvp, 0);
2749 			mutex_exit(&nvp->nvp_mutex);
2750 		}
2751 
2752 		if (clear) {
2753 			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, clear);
2754 			clear = 0;
2755 		}
2756 
2757 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
2758 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
2759 			    "processing.  Disabling port int_status=%X"
2760 			    " clear=%X", int_status, clear);
2761 			mutex_enter(&nvp->nvp_mutex);
2762 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2763 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2764 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2765 			nvp->nvp_state |= NV_PORT_FAILED;
2766 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2767 			mutex_exit(&nvp->nvp_mutex);
2768 		}
2769 	}
2770 
2771 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
2772 
2773 	return (ret);
2774 }
2775 
2776 
2777 /* ARGSUSED */
2778 static uint_t
2779 mcp55_intr(caddr_t arg1, caddr_t arg2)
2780 {
2781 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2782 	int ret;
2783 
2784 	ret = mcp55_intr_port(&(nvc->nvc_port[0]));
2785 	ret |= mcp55_intr_port(&(nvc->nvc_port[1]));
2786 
2787 	return (ret);
2788 }
2789 
2790 
2791 #ifdef NCQ
2792 /*
2793  * with software driven NCQ on mcp55, an interrupt occurs right
2794  * before the drive is ready to do a DMA transfer.  At this point,
2795  * the PRD table needs to be programmed and the DMA engine enabled
2796  * and ready to go.
2797  *
2798  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
2799  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
2800  * -- clear bit 0 of master command reg
2801  * -- program PRD
2802  * -- clear the interrupt status bit for the DMA Setup FIS
2803  * -- set bit 0 of the bus master command register
2804  */
2805 static int
2806 mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
2807 {
2808 	int slot;
2809 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
2810 	uint8_t bmicx;
2811 	int port = nvp->nvp_port_num;
2812 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
2813 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
2814 
2815 	nv_cmn_err(CE_PANIC, nvc, nvp,
2816 	    "this is should not be executed at all until NCQ");
2817 
2818 	mutex_enter(&nvp->nvp_mutex);
2819 
2820 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq);
2821 
2822 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
2823 
2824 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr slot %d"
2825 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
2826 
2827 	/*
2828 	 * halt the DMA engine.  This step is necessary according to
2829 	 * the mcp55 spec, probably since there may have been a "first" packet
2830 	 * that already programmed the DMA engine, but may not turn out to
2831 	 * be the first one processed.
2832 	 */
2833 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
2834 
2835 #if 0
2836 	if (bmicx & BMICX_SSBM) {
2837 		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
2838 		    "another packet.  Cancelling and reprogramming"));
2839 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2840 	}
2841 #endif
2842 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2843 
2844 	nv_start_dma_engine(nvp, slot);
2845 
2846 	mutex_exit(&nvp->nvp_mutex);
2847 
2848 	return (DDI_INTR_CLAIMED);
2849 }
2850 #endif /* NCQ */
2851 
2852 
2853 /*
2854  * packet completion interrupt.  If the packet is complete, invoke
2855  * the packet completion callback.
2856  */
2857 static int
2858 mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
2859 {
2860 	uint8_t status, bmstatus;
2861 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
2862 	int sactive;
2863 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
2864 	sata_pkt_t *spkt;
2865 	nv_slot_t *nv_slotp;
2866 
2867 	mutex_enter(&nvp->nvp_mutex);
2868 
2869 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2870 
2871 	if (!(bmstatus & BMISX_IDEINTS)) {
2872 		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
2873 		mutex_exit(&nvp->nvp_mutex);
2874 
2875 		return (NV_FAILURE);
2876 	}
2877 
2878 	/*
2879 	 * If the just completed item is a non-ncq command, the busy
2880 	 * bit should not be set
2881 	 */
2882 	if (nvp->nvp_non_ncq_run) {
2883 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2884 		if (status & SATA_STATUS_BSY) {
2885 			nv_cmn_err(CE_WARN, nvc, nvp,
2886 			    "unexpected SATA_STATUS_BSY set");
2887 			mutex_exit(&nvp->nvp_mutex);
2888 			/*
2889 			 * calling function will clear interrupt.  then
2890 			 * the real interrupt will either arrive or the
2891 			 * packet timeout handling will take over and
2892 			 * reset.
2893 			 */
2894 			return (NV_FAILURE);
2895 		}
2896 
2897 	} else {
2898 		/*
2899 		 * NCQ check for BSY here and wait if still bsy before
2900 		 * continuing. Rather than wait for it to be cleared
2901 		 * when starting a packet and wasting CPU time, the starting
2902 		 * thread can exit immediate, but might have to spin here
2903 		 * for a bit possibly.  Needs more work and experimentation.
2904 		 */
2905 		ASSERT(nvp->nvp_ncq_run);
2906 	}
2907 
2908 
2909 	if (nvp->nvp_ncq_run) {
2910 		ncq_command = B_TRUE;
2911 		ASSERT(nvp->nvp_non_ncq_run == 0);
2912 	} else {
2913 		ASSERT(nvp->nvp_non_ncq_run != 0);
2914 	}
2915 
2916 	/*
2917 	 * active_pkt_bit will represent the bitmap of the single completed
2918 	 * packet.  Because of the nature of sw assisted NCQ, only one
2919 	 * command will complete per interrupt.
2920 	 */
2921 
2922 	if (ncq_command == B_FALSE) {
2923 		active_pkt = 0;
2924 	} else {
2925 		/*
2926 		 * NCQ: determine which command just completed, by examining
2927 		 * which bit cleared in the register since last written.
2928 		 */
2929 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
2930 
2931 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
2932 
2933 		ASSERT(active_pkt_bit);
2934 
2935 
2936 		/*
2937 		 * this failure path needs more work to handle the
2938 		 * error condition and recovery.
2939 		 */
2940 		if (active_pkt_bit == 0) {
2941 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2942 
2943 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
2944 			    "nvp->nvp_sactive %X", sactive,
2945 			    nvp->nvp_sactive_cache);
2946 
2947 			(void) nv_get8(cmdhdl, nvp->nvp_status);
2948 
2949 			mutex_exit(&nvp->nvp_mutex);
2950 
2951 			return (NV_FAILURE);
2952 		}
2953 
2954 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
2955 		    active_pkt++, active_pkt_bit >>= 1) {
2956 		}
2957 
2958 		/*
2959 		 * make sure only one bit is ever turned on
2960 		 */
2961 		ASSERT(active_pkt_bit == 1);
2962 
2963 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
2964 	}
2965 
2966 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
2967 
2968 	spkt = nv_slotp->nvslot_spkt;
2969 
2970 	ASSERT(spkt != NULL);
2971 
2972 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2973 
2974 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2975 
2976 	/*
2977 	 * If there is no link cannot be certain about the completion
2978 	 * of the packet, so abort it.
2979 	 */
2980 	if (nv_check_link((&spkt->satapkt_device)->
2981 	    satadev_scr.sstatus) == B_FALSE) {
2982 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2983 
2984 	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2985 
2986 		nv_complete_io(nvp, spkt, active_pkt);
2987 	}
2988 
2989 	mutex_exit(&nvp->nvp_mutex);
2990 
2991 	return (NV_SUCCESS);
2992 }
2993 
2994 
2995 static void
2996 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
2997 {
2998 
2999 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3000 
3001 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3002 		nvp->nvp_ncq_run--;
3003 	} else {
3004 		nvp->nvp_non_ncq_run--;
3005 	}
3006 
3007 	/*
3008 	 * mark the packet slot idle so it can be reused.  Do this before
3009 	 * calling satapkt_comp so the slot can be reused.
3010 	 */
3011 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3012 
3013 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3014 		/*
3015 		 * If this is not timed polled mode cmd, which has an
3016 		 * active thread monitoring for completion, then need
3017 		 * to signal the sleeping thread that the cmd is complete.
3018 		 */
3019 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3020 			cv_signal(&nvp->nvp_poll_cv);
3021 		}
3022 
3023 		return;
3024 	}
3025 
3026 	if (spkt->satapkt_comp != NULL) {
3027 		mutex_exit(&nvp->nvp_mutex);
3028 		(*spkt->satapkt_comp)(spkt);
3029 		mutex_enter(&nvp->nvp_mutex);
3030 	}
3031 }
3032 
3033 
3034 /*
3035  * check whether packet is ncq command or not.  for ncq command,
3036  * start it if there is still room on queue.  for non-ncq command only
3037  * start if no other command is running.
3038  */
3039 static int
3040 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3041 {
3042 	uint8_t cmd, ncq;
3043 
3044 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3045 
3046 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3047 
3048 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3049 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3050 
3051 	if (ncq == B_FALSE) {
3052 
3053 		if ((nvp->nvp_non_ncq_run == 1) ||
3054 		    (nvp->nvp_ncq_run > 0)) {
3055 			/*
3056 			 * next command is non-ncq which can't run
3057 			 * concurrently.  exit and return queue full.
3058 			 */
3059 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3060 
3061 			return (SATA_TRAN_QUEUE_FULL);
3062 		}
3063 
3064 		return (nv_start_common(nvp, spkt));
3065 	}
3066 
3067 	/*
3068 	 * ncq == B_TRUE
3069 	 */
3070 	if (nvp->nvp_non_ncq_run == 1) {
3071 		/*
3072 		 * cannot start any NCQ commands when there
3073 		 * is a non-NCQ command running.
3074 		 */
3075 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3076 
3077 		return (SATA_TRAN_QUEUE_FULL);
3078 	}
3079 
3080 #ifdef NCQ
3081 	/*
3082 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3083 	 * is being pulled out until NCQ support is later addressed
3084 	 *
3085 	 * nvp_queue_depth is initialized by the first NCQ command
3086 	 * received.
3087 	 */
3088 	if (nvp->nvp_queue_depth == 1) {
3089 		nvp->nvp_queue_depth =
3090 		    spkt->satapkt_device.satadev_qdepth;
3091 
3092 		ASSERT(nvp->nvp_queue_depth > 1);
3093 
3094 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3095 		    "nv_process_queue: nvp_queue_depth set to %d",
3096 		    nvp->nvp_queue_depth));
3097 	}
3098 #endif
3099 
3100 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3101 		/*
3102 		 * max number of NCQ commands already active
3103 		 */
3104 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3105 
3106 		return (SATA_TRAN_QUEUE_FULL);
3107 	}
3108 
3109 	return (nv_start_common(nvp, spkt));
3110 }
3111 
3112 
3113 /*
3114  * configure INTx and legacy interrupts
3115  */
3116 static int
3117 nv_add_legacy_intrs(nv_ctl_t *nvc)
3118 {
3119 	dev_info_t	*devinfo = nvc->nvc_dip;
3120 	int		actual, count = 0;
3121 	int		x, y, rc, inum = 0;
3122 
3123 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3124 
3125 	/*
3126 	 * get number of interrupts
3127 	 */
3128 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3129 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3130 		NVLOG((NVDBG_INTR, nvc, NULL,
3131 		    "ddi_intr_get_nintrs() failed, "
3132 		    "rc %d count %d", rc, count));
3133 
3134 		return (DDI_FAILURE);
3135 	}
3136 
3137 	/*
3138 	 * allocate an array of interrupt handles
3139 	 */
3140 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3141 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3142 
3143 	/*
3144 	 * call ddi_intr_alloc()
3145 	 */
3146 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3147 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3148 
3149 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3150 		nv_cmn_err(CE_WARN, nvc, NULL,
3151 		    "ddi_intr_alloc() failed, rc %d", rc);
3152 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3153 
3154 		return (DDI_FAILURE);
3155 	}
3156 
3157 	if (actual < count) {
3158 		nv_cmn_err(CE_WARN, nvc, NULL,
3159 		    "ddi_intr_alloc: requested: %d, received: %d",
3160 		    count, actual);
3161 
3162 		goto failure;
3163 	}
3164 
3165 	nvc->nvc_intr_cnt = actual;
3166 
3167 	/*
3168 	 * get intr priority
3169 	 */
3170 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3171 	    DDI_SUCCESS) {
3172 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3173 
3174 		goto failure;
3175 	}
3176 
3177 	/*
3178 	 * Test for high level mutex
3179 	 */
3180 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3181 		nv_cmn_err(CE_WARN, nvc, NULL,
3182 		    "nv_add_legacy_intrs: high level intr not supported");
3183 
3184 		goto failure;
3185 	}
3186 
3187 	for (x = 0; x < actual; x++) {
3188 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3189 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3190 			nv_cmn_err(CE_WARN, nvc, NULL,
3191 			    "ddi_intr_add_handler() failed");
3192 
3193 			goto failure;
3194 		}
3195 	}
3196 
3197 	/*
3198 	 * call ddi_intr_enable() for legacy interrupts
3199 	 */
3200 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3201 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3202 	}
3203 
3204 	return (DDI_SUCCESS);
3205 
3206 	failure:
3207 	/*
3208 	 * free allocated intr and nvc_htable
3209 	 */
3210 	for (y = 0; y < actual; y++) {
3211 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3212 	}
3213 
3214 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3215 
3216 	return (DDI_FAILURE);
3217 }
3218 
3219 #ifdef	NV_MSI_SUPPORTED
3220 /*
3221  * configure MSI interrupts
3222  */
3223 static int
3224 nv_add_msi_intrs(nv_ctl_t *nvc)
3225 {
3226 	dev_info_t	*devinfo = nvc->nvc_dip;
3227 	int		count, avail, actual;
3228 	int		x, y, rc, inum = 0;
3229 
3230 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3231 
3232 	/*
3233 	 * get number of interrupts
3234 	 */
3235 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3236 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3237 		nv_cmn_err(CE_WARN, nvc, NULL,
3238 		    "ddi_intr_get_nintrs() failed, "
3239 		    "rc %d count %d", rc, count);
3240 
3241 		return (DDI_FAILURE);
3242 	}
3243 
3244 	/*
3245 	 * get number of available interrupts
3246 	 */
3247 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3248 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3249 		nv_cmn_err(CE_WARN, nvc, NULL,
3250 		    "ddi_intr_get_navail() failed, "
3251 		    "rc %d avail %d", rc, avail);
3252 
3253 		return (DDI_FAILURE);
3254 	}
3255 
3256 	if (avail < count) {
3257 		nv_cmn_err(CE_WARN, nvc, NULL,
3258 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3259 		    avail, count);
3260 	}
3261 
3262 	/*
3263 	 * allocate an array of interrupt handles
3264 	 */
3265 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3266 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3267 
3268 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3269 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3270 
3271 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3272 		nv_cmn_err(CE_WARN, nvc, NULL,
3273 		    "ddi_intr_alloc() failed, rc %d", rc);
3274 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3275 
3276 		return (DDI_FAILURE);
3277 	}
3278 
3279 	/*
3280 	 * Use interrupt count returned or abort?
3281 	 */
3282 	if (actual < count) {
3283 		NVLOG((NVDBG_INIT, nvc, NULL,
3284 		    "Requested: %d, Received: %d", count, actual));
3285 	}
3286 
3287 	nvc->nvc_intr_cnt = actual;
3288 
3289 	/*
3290 	 * get priority for first msi, assume remaining are all the same
3291 	 */
3292 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3293 	    DDI_SUCCESS) {
3294 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3295 
3296 		goto failure;
3297 	}
3298 
3299 	/*
3300 	 * test for high level mutex
3301 	 */
3302 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3303 		nv_cmn_err(CE_WARN, nvc, NULL,
3304 		    "nv_add_msi_intrs: high level intr not supported");
3305 
3306 		goto failure;
3307 	}
3308 
3309 	/*
3310 	 * Call ddi_intr_add_handler()
3311 	 */
3312 	for (x = 0; x < actual; x++) {
3313 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3314 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3315 			nv_cmn_err(CE_WARN, nvc, NULL,
3316 			    "ddi_intr_add_handler() failed");
3317 
3318 			goto failure;
3319 		}
3320 	}
3321 
3322 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3323 
3324 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3325 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3326 		    nvc->nvc_intr_cnt);
3327 	} else {
3328 		/*
3329 		 * Call ddi_intr_enable() for MSI non block enable
3330 		 */
3331 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3332 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3333 		}
3334 	}
3335 
3336 	return (DDI_SUCCESS);
3337 
3338 	failure:
3339 	/*
3340 	 * free allocated intr and nvc_htable
3341 	 */
3342 	for (y = 0; y < actual; y++) {
3343 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3344 	}
3345 
3346 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3347 
3348 	return (DDI_FAILURE);
3349 }
3350 #endif
3351 
3352 
3353 static void
3354 nv_rem_intrs(nv_ctl_t *nvc)
3355 {
3356 	int x, i;
3357 	nv_port_t *nvp;
3358 
3359 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3360 
3361 	/*
3362 	 * prevent controller from generating interrupts by
3363 	 * masking them out.  This is an extra precaution.
3364 	 */
3365 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3366 		nvp = (&nvc->nvc_port[i]);
3367 		mutex_enter(&nvp->nvp_mutex);
3368 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3369 		mutex_exit(&nvp->nvp_mutex);
3370 	}
3371 
3372 	/*
3373 	 * disable all interrupts
3374 	 */
3375 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3376 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3377 		(void) ddi_intr_block_disable(nvc->nvc_htable,
3378 		    nvc->nvc_intr_cnt);
3379 	} else {
3380 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3381 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3382 		}
3383 	}
3384 
3385 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3386 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3387 		(void) ddi_intr_free(nvc->nvc_htable[x]);
3388 	}
3389 
3390 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3391 }
3392 
3393 
3394 /*
3395  * variable argument wrapper for cmn_err.  prefixes the instance and port
3396  * number if possible
3397  */
3398 static void
3399 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3400 {
3401 	char port[NV_STRING_10];
3402 	char inst[NV_STRING_10];
3403 
3404 	mutex_enter(&nv_log_mutex);
3405 
3406 	if (nvc) {
3407 		(void) snprintf(inst, NV_STRING_10, "inst %d",
3408 		    ddi_get_instance(nvc->nvc_dip));
3409 	} else {
3410 		inst[0] = '\0';
3411 	}
3412 
3413 	if (nvp) {
3414 		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3415 	} else {
3416 		port[0] = '\0';
3417 	}
3418 
3419 	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3420 	    (inst[0]|port[0] ? ": " :""));
3421 
3422 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3423 	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3424 
3425 	/*
3426 	 * normally set to log to console but in some debug situations it
3427 	 * may be useful to log only to a file.
3428 	 */
3429 	if (nv_log_to_console) {
3430 		if (nv_prom_print) {
3431 			prom_printf("%s\n", nv_log_buf);
3432 		} else {
3433 			cmn_err(ce, "%s", nv_log_buf);
3434 		}
3435 
3436 
3437 	} else {
3438 		cmn_err(ce, "!%s", nv_log_buf);
3439 	}
3440 
3441 	mutex_exit(&nv_log_mutex);
3442 }
3443 
3444 
3445 /*
3446  * wrapper for cmn_err
3447  */
3448 static void
3449 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3450 {
3451 	va_list ap;
3452 
3453 	va_start(ap, fmt);
3454 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3455 	va_end(ap);
3456 }
3457 
3458 
3459 #if defined(DEBUG)
3460 /*
3461  * prefixes the instance and port number if possible to the debug message
3462  */
3463 static void
3464 nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3465 {
3466 	va_list ap;
3467 
3468 	if ((nv_debug_flags & flag) == 0) {
3469 		return;
3470 	}
3471 
3472 	va_start(ap, fmt);
3473 	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3474 	va_end(ap);
3475 
3476 	/*
3477 	 * useful for some debugging situations
3478 	 */
3479 	if (nv_log_delay) {
3480 		drv_usecwait(nv_log_delay);
3481 	}
3482 
3483 }
3484 #endif /* DEBUG */
3485 
3486 
3487 /*
3488  * program registers which are common to all commands
3489  */
3490 static void
3491 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3492 {
3493 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3494 	sata_pkt_t *spkt;
3495 	sata_cmd_t *satacmd;
3496 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3497 	uint8_t cmd, ncq = B_FALSE;
3498 
3499 	spkt = nv_slotp->nvslot_spkt;
3500 	satacmd = &spkt->satapkt_cmd;
3501 	cmd = satacmd->satacmd_cmd_reg;
3502 
3503 	ASSERT(nvp->nvp_slot);
3504 
3505 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3506 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3507 		ncq = B_TRUE;
3508 	}
3509 
3510 	/*
3511 	 * select the drive
3512 	 */
3513 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3514 
3515 	/*
3516 	 * make certain the drive selected
3517 	 */
3518 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3519 	    NV_SEC2USEC(5), 0) == B_FALSE) {
3520 
3521 		return;
3522 	}
3523 
3524 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3525 
3526 	case ATA_ADDR_LBA:
3527 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3528 
3529 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3530 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3531 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3532 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3533 
3534 		break;
3535 
3536 	case ATA_ADDR_LBA28:
3537 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3538 		    "ATA_ADDR_LBA28 mode"));
3539 		/*
3540 		 * NCQ only uses 48-bit addressing
3541 		 */
3542 		ASSERT(ncq != B_TRUE);
3543 
3544 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3545 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3546 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3547 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3548 
3549 		break;
3550 
3551 	case ATA_ADDR_LBA48:
3552 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3553 		    "ATA_ADDR_LBA48 mode"));
3554 
3555 		/*
3556 		 * for NCQ, tag goes into count register and real sector count
3557 		 * into features register.  The sata module does the translation
3558 		 * in the satacmd.
3559 		 */
3560 		if (ncq == B_TRUE) {
3561 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3562 			nv_put8(cmdhdl, nvp->nvp_feature,
3563 			    satacmd->satacmd_features_reg_ext);
3564 			nv_put8(cmdhdl, nvp->nvp_feature,
3565 			    satacmd->satacmd_features_reg);
3566 		} else {
3567 			nv_put8(cmdhdl, nvp->nvp_count,
3568 			    satacmd->satacmd_sec_count_msb);
3569 			nv_put8(cmdhdl, nvp->nvp_count,
3570 			    satacmd->satacmd_sec_count_lsb);
3571 		}
3572 
3573 		/*
3574 		 * send the high-order half first
3575 		 */
3576 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3577 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3578 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3579 		/*
3580 		 * Send the low-order half
3581 		 */
3582 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3583 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3584 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3585 
3586 		break;
3587 
3588 	case 0:
3589 		/*
3590 		 * non-media access commands such as identify and features
3591 		 * take this path.
3592 		 */
3593 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3594 		nv_put8(cmdhdl, nvp->nvp_feature,
3595 		    satacmd->satacmd_features_reg);
3596 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3597 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3598 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3599 
3600 		break;
3601 
3602 	default:
3603 		break;
3604 	}
3605 
3606 	ASSERT(nvp->nvp_slot);
3607 }
3608 
3609 
3610 /*
3611  * start a command that involves no media access
3612  */
3613 static int
3614 nv_start_nodata(nv_port_t *nvp, int slot)
3615 {
3616 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3617 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3618 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3619 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3620 
3621 	nv_program_taskfile_regs(nvp, slot);
3622 
3623 	/*
3624 	 * This next one sets the controller in motion
3625 	 */
3626 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3627 
3628 	return (SATA_TRAN_ACCEPTED);
3629 }
3630 
3631 
3632 int
3633 nv_bm_status_clear(nv_port_t *nvp)
3634 {
3635 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3636 	uchar_t	status, ret;
3637 
3638 	/*
3639 	 * Get the current BM status
3640 	 */
3641 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3642 
3643 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3644 
3645 	/*
3646 	 * Clear the latches (and preserve the other bits)
3647 	 */
3648 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3649 
3650 	return (ret);
3651 }
3652 
3653 
3654 /*
3655  * program the bus master DMA engine with the PRD address for
3656  * the active slot command, and start the DMA engine.
3657  */
3658 static void
3659 nv_start_dma_engine(nv_port_t *nvp, int slot)
3660 {
3661 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3662 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3663 	uchar_t direction;
3664 
3665 	ASSERT(nv_slotp->nvslot_spkt != NULL);
3666 
3667 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3668 	    == SATA_DIR_READ) {
3669 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3670 	} else {
3671 		direction = BMICX_RWCON_READ_FROM_MEMORY;
3672 	}
3673 
3674 	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3675 	    "nv_start_dma_engine entered"));
3676 
3677 	/*
3678 	 * reset the controller's interrupt and error status bits
3679 	 */
3680 	(void) nv_bm_status_clear(nvp);
3681 
3682 	/*
3683 	 * program the PRD table physical start address
3684 	 */
3685 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3686 
3687 	/*
3688 	 * set the direction control and start the DMA controller
3689 	 */
3690 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3691 }
3692 
3693 /*
3694  * start dma command, either in or out
3695  */
3696 static int
3697 nv_start_dma(nv_port_t *nvp, int slot)
3698 {
3699 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3700 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3701 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3702 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3703 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3704 #ifdef NCQ
3705 	uint8_t ncq = B_FALSE;
3706 #endif
3707 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3708 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3709 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3710 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
3711 
3712 	ASSERT(sg_count != 0);
3713 
3714 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
3715 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
3716 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
3717 		    sata_cmdp->satacmd_num_dma_cookies);
3718 
3719 		return (NV_FAILURE);
3720 	}
3721 
3722 	nv_program_taskfile_regs(nvp, slot);
3723 
3724 	/*
3725 	 * start the drive in motion
3726 	 */
3727 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
3728 
3729 	/*
3730 	 * the drive starts processing the transaction when the cmd register
3731 	 * is written.  This is done here before programming the DMA engine to
3732 	 * parallelize and save some time.  In the event that the drive is ready
3733 	 * before DMA, it will wait.
3734 	 */
3735 #ifdef NCQ
3736 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3737 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3738 		ncq = B_TRUE;
3739 	}
3740 #endif
3741 
3742 	/*
3743 	 * copy the PRD list to PRD table in DMA accessible memory
3744 	 * so that the controller can access it.
3745 	 */
3746 	for (idx = 0; idx < sg_count; idx++, srcp++) {
3747 		uint32_t size;
3748 
3749 		ASSERT(srcp->dmac_size <= UINT16_MAX);
3750 
3751 		nv_put32(sghdl, dstp++, srcp->dmac_address);
3752 
3753 		size = srcp->dmac_size;
3754 
3755 		/*
3756 		 * If this is a 40-bit address, copy bits 32-40 of the
3757 		 * physical address to bits 16-24 of the PRD count.
3758 		 */
3759 		if (srcp->dmac_laddress > UINT32_MAX) {
3760 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
3761 		}
3762 
3763 		/*
3764 		 * set the end of table flag for the last entry
3765 		 */
3766 		if (idx == (sg_count - 1)) {
3767 			size |= PRDE_EOT;
3768 		}
3769 
3770 		nv_put32(sghdl, dstp++, size);
3771 	}
3772 
3773 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
3774 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
3775 
3776 	nv_start_dma_engine(nvp, slot);
3777 
3778 #ifdef NCQ
3779 	/*
3780 	 * optimization:  for SWNCQ, start DMA engine if this is the only
3781 	 * command running.  Preliminary NCQ efforts indicated this needs
3782 	 * more debugging.
3783 	 *
3784 	 * if (nvp->nvp_ncq_run <= 1)
3785 	 */
3786 
3787 	if (ncq == B_FALSE) {
3788 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3789 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
3790 		    " cmd = %X", non_ncq_commands++, cmd));
3791 		nv_start_dma_engine(nvp, slot);
3792 	} else {
3793 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
3794 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
3795 	}
3796 #endif /* NCQ */
3797 
3798 	return (SATA_TRAN_ACCEPTED);
3799 }
3800 
3801 
3802 /*
3803  * start a PIO data-in ATA command
3804  */
3805 static int
3806 nv_start_pio_in(nv_port_t *nvp, int slot)
3807 {
3808 
3809 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3810 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3811 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3812 
3813 	nv_program_taskfile_regs(nvp, slot);
3814 
3815 	/*
3816 	 * This next one sets the drive in motion
3817 	 */
3818 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3819 
3820 	return (SATA_TRAN_ACCEPTED);
3821 }
3822 
3823 
3824 /*
3825  * start a PIO data-out ATA command
3826  */
3827 static int
3828 nv_start_pio_out(nv_port_t *nvp, int slot)
3829 {
3830 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3831 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3832 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3833 
3834 	nv_program_taskfile_regs(nvp, slot);
3835 
3836 	/*
3837 	 * this next one sets the drive in motion
3838 	 */
3839 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3840 
3841 	/*
3842 	 * wait for the busy bit to settle
3843 	 */
3844 	NV_DELAY_NSEC(400);
3845 
3846 	/*
3847 	 * wait for the drive to assert DRQ to send the first chunk
3848 	 * of data. Have to busy wait because there's no interrupt for
3849 	 * the first chunk. This is bad... uses a lot of cycles if the
3850 	 * drive responds too slowly or if the wait loop granularity
3851 	 * is too large. It's even worse if the drive is defective and
3852 	 * the loop times out.
3853 	 */
3854 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
3855 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
3856 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
3857 	    4000000, 0) == B_FALSE) {
3858 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3859 
3860 		goto error;
3861 	}
3862 
3863 	/*
3864 	 * send the first block.
3865 	 */
3866 	nv_intr_pio_out(nvp, nv_slotp);
3867 
3868 	/*
3869 	 * If nvslot_flags is not set to COMPLETE yet, then processing
3870 	 * is OK so far, so return.  Otherwise, fall into error handling
3871 	 * below.
3872 	 */
3873 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
3874 
3875 		return (SATA_TRAN_ACCEPTED);
3876 	}
3877 
3878 	error:
3879 	/*
3880 	 * there was an error so reset the device and complete the packet.
3881 	 */
3882 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3883 	nv_complete_io(nvp, spkt, 0);
3884 	nv_reset(nvp);
3885 
3886 	return (SATA_TRAN_PORT_ERROR);
3887 }
3888 
3889 
3890 /*
3891  * start a ATAPI Packet command (PIO data in or out)
3892  */
3893 static int
3894 nv_start_pkt_pio(nv_port_t *nvp, int slot)
3895 {
3896 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3897 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3898 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3899 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
3900 
3901 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
3902 	    "nv_start_pkt_pio: start"));
3903 
3904 	/*
3905 	 * Write the PACKET command to the command register.  Normally
3906 	 * this would be done through nv_program_taskfile_regs().  It
3907 	 * is done here because some values need to be overridden.
3908 	 */
3909 
3910 	/* select the drive */
3911 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3912 
3913 	/* make certain the drive selected */
3914 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3915 	    NV_SEC2USEC(5), 0) == B_FALSE) {
3916 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
3917 		    "nv_start_pkt_pio: drive select failed"));
3918 		return (SATA_TRAN_PORT_ERROR);
3919 	}
3920 
3921 	/*
3922 	 * The command is always sent via PIO, despite whatever the SATA
3923 	 * framework sets in the command.  Overwrite the DMA bit to do this.
3924 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
3925 	 */
3926 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
3927 
3928 	/* set appropriately by the sata framework */
3929 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3930 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3931 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3932 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3933 
3934 	/* initiate the command by writing the command register last */
3935 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3936 
3937 	/* Give the host controller time to do its thing */
3938 	NV_DELAY_NSEC(400);
3939 
3940 	/*
3941 	 * Wait for the device to indicate that it is ready for the command
3942 	 * ATAPI protocol state - HP0: Check_Status_A
3943 	 */
3944 
3945 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
3946 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
3947 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
3948 	    4000000, 0) == B_FALSE) {
3949 		/*
3950 		 * Either an error or device fault occurred or the wait
3951 		 * timed out.  According to the ATAPI protocol, command
3952 		 * completion is also possible.  Other implementations of
3953 		 * this protocol don't handle this last case, so neither
3954 		 * does this code.
3955 		 */
3956 
3957 		if (nv_get8(cmdhdl, nvp->nvp_status) &
3958 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
3959 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3960 
3961 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
3962 			    "nv_start_pkt_pio: device error (HP0)"));
3963 		} else {
3964 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3965 
3966 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
3967 			    "nv_start_pkt_pio: timeout (HP0)"));
3968 		}
3969 
3970 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3971 		nv_complete_io(nvp, spkt, 0);
3972 		nv_reset(nvp);
3973 
3974 		return (SATA_TRAN_PORT_ERROR);
3975 	}
3976 
3977 	/*
3978 	 * Put the ATAPI command in the data register
3979 	 * ATAPI protocol state - HP1: Send_Packet
3980 	 */
3981 
3982 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
3983 	    (ushort_t *)nvp->nvp_data,
3984 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
3985 
3986 	/*
3987 	 * See you in nv_intr_pkt_pio.
3988 	 * ATAPI protocol state - HP3: INTRQ_wait
3989 	 */
3990 
3991 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
3992 	    "nv_start_pkt_pio: exiting into HP3"));
3993 
3994 	return (SATA_TRAN_ACCEPTED);
3995 }
3996 
3997 
3998 /*
3999  * Interrupt processing for a non-data ATA command.
4000  */
4001 static void
4002 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4003 {
4004 	uchar_t status;
4005 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4006 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4007 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4008 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4009 
4010 	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
4011 
4012 	status = nv_get8(cmdhdl, nvp->nvp_status);
4013 
4014 	/*
4015 	 * check for errors
4016 	 */
4017 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4018 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4019 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4020 		    nvp->nvp_altstatus);
4021 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4022 	} else {
4023 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4024 	}
4025 
4026 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4027 }
4028 
4029 
4030 /*
4031  * ATA command, PIO data in
4032  */
4033 static void
4034 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4035 {
4036 	uchar_t	status;
4037 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4038 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4039 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4040 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4041 	int count;
4042 
4043 	status = nv_get8(cmdhdl, nvp->nvp_status);
4044 
4045 	if (status & SATA_STATUS_BSY) {
4046 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4047 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4048 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4049 		    nvp->nvp_altstatus);
4050 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4051 		nv_reset(nvp);
4052 
4053 		return;
4054 	}
4055 
4056 	/*
4057 	 * check for errors
4058 	 */
4059 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4060 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4061 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4062 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4063 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4064 
4065 		return;
4066 	}
4067 
4068 	/*
4069 	 * read the next chunk of data (if any)
4070 	 */
4071 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4072 
4073 	/*
4074 	 * read count bytes
4075 	 */
4076 	ASSERT(count != 0);
4077 
4078 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4079 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4080 
4081 	nv_slotp->nvslot_v_addr += count;
4082 	nv_slotp->nvslot_byte_count -= count;
4083 
4084 
4085 	if (nv_slotp->nvslot_byte_count != 0) {
4086 		/*
4087 		 * more to transfer.  Wait for next interrupt.
4088 		 */
4089 		return;
4090 	}
4091 
4092 	/*
4093 	 * transfer is complete. wait for the busy bit to settle.
4094 	 */
4095 	NV_DELAY_NSEC(400);
4096 
4097 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4098 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4099 }
4100 
4101 
4102 /*
4103  * ATA command PIO data out
4104  */
4105 static void
4106 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4107 {
4108 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4109 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4110 	uchar_t status;
4111 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4112 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4113 	int count;
4114 
4115 	/*
4116 	 * clear the IRQ
4117 	 */
4118 	status = nv_get8(cmdhdl, nvp->nvp_status);
4119 
4120 	if (status & SATA_STATUS_BSY) {
4121 		/*
4122 		 * this should not happen
4123 		 */
4124 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4125 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4126 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4127 		    nvp->nvp_altstatus);
4128 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4129 
4130 		return;
4131 	}
4132 
4133 	/*
4134 	 * check for errors
4135 	 */
4136 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4137 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4138 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4139 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4140 
4141 		return;
4142 	}
4143 
4144 	/*
4145 	 * this is the condition which signals the drive is
4146 	 * no longer ready to transfer.  Likely that the transfer
4147 	 * completed successfully, but check that byte_count is
4148 	 * zero.
4149 	 */
4150 	if ((status & SATA_STATUS_DRQ) == 0) {
4151 
4152 		if (nv_slotp->nvslot_byte_count == 0) {
4153 			/*
4154 			 * complete; successful transfer
4155 			 */
4156 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4157 		} else {
4158 			/*
4159 			 * error condition, incomplete transfer
4160 			 */
4161 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4162 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4163 		}
4164 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4165 
4166 		return;
4167 	}
4168 
4169 	/*
4170 	 * write the next chunk of data
4171 	 */
4172 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4173 
4174 	/*
4175 	 * read or write count bytes
4176 	 */
4177 
4178 	ASSERT(count != 0);
4179 
4180 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4181 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4182 
4183 	nv_slotp->nvslot_v_addr += count;
4184 	nv_slotp->nvslot_byte_count -= count;
4185 }
4186 
4187 
4188 /*
4189  * ATAPI PACKET command, PIO in/out interrupt
4190  *
4191  * Under normal circumstances, one of four different interrupt scenarios
4192  * will result in this function being called:
4193  *
4194  * 1. Packet command data transfer
4195  * 2. Packet command completion
4196  * 3. Request sense data transfer
4197  * 4. Request sense command completion
4198  */
4199 static void
4200 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4201 {
4202 	uchar_t	status;
4203 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4204 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4205 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4206 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4207 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4208 	uint16_t ctlr_count;
4209 	int count;
4210 
4211 	/* ATAPI protocol state - HP2: Check_Status_B */
4212 
4213 	status = nv_get8(cmdhdl, nvp->nvp_status);
4214 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4215 	    "nv_intr_pkt_pio: status 0x%x", status));
4216 
4217 	if (status & SATA_STATUS_BSY) {
4218 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4219 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4220 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4221 		} else {
4222 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4223 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4224 
4225 			nv_reset(nvp);
4226 		}
4227 
4228 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4229 		    "nv_intr_pkt_pio: busy - status 0x%x", status));
4230 
4231 		return;
4232 	}
4233 
4234 	if ((status & SATA_STATUS_DF) != 0) {
4235 		/*
4236 		 * On device fault, just clean up and bail.  Request sense
4237 		 * will just default to its NO SENSE initialized value.
4238 		 */
4239 
4240 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4241 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4242 		}
4243 
4244 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4245 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4246 
4247 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4248 		    nvp->nvp_altstatus);
4249 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4250 		    nvp->nvp_error);
4251 
4252 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4253 		    "nv_intr_pkt_pio: device fault"));
4254 
4255 		return;
4256 	}
4257 
4258 	if ((status & SATA_STATUS_ERR) != 0) {
4259 		/*
4260 		 * On command error, figure out whether we are processing a
4261 		 * request sense.  If so, clean up and bail.  Otherwise,
4262 		 * do a REQUEST SENSE.
4263 		 */
4264 
4265 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4266 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4267 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4268 			    NV_FAILURE) {
4269 				nv_copy_registers(nvp, &spkt->satapkt_device,
4270 				    spkt);
4271 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4272 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4273 			}
4274 
4275 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4276 			    nvp->nvp_altstatus);
4277 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4278 			    nvp->nvp_error);
4279 		} else {
4280 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4281 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4282 
4283 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4284 		}
4285 
4286 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4287 		    "nv_intr_pkt_pio: error (status 0x%x)", status));
4288 
4289 		return;
4290 	}
4291 
4292 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4293 		/*
4294 		 * REQUEST SENSE command processing
4295 		 */
4296 
4297 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4298 			/* ATAPI state - HP4: Transfer_Data */
4299 
4300 			/* read the byte count from the controller */
4301 			ctlr_count =
4302 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4303 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4304 
4305 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4306 			    "nv_intr_pkt_pio: ctlr byte count - %d",
4307 			    ctlr_count));
4308 
4309 			if (ctlr_count == 0) {
4310 				/* no data to transfer - some devices do this */
4311 
4312 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4313 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4314 
4315 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4316 				    "nv_intr_pkt_pio: done (no data)"));
4317 
4318 				return;
4319 			}
4320 
4321 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4322 
4323 			/* transfer the data */
4324 			ddi_rep_get16(cmdhdl,
4325 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4326 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4327 			    DDI_DEV_NO_AUTOINCR);
4328 
4329 			/* consume residual bytes */
4330 			ctlr_count -= count;
4331 
4332 			if (ctlr_count > 0) {
4333 				for (; ctlr_count > 0; ctlr_count -= 2)
4334 					(void) ddi_get16(cmdhdl,
4335 					    (ushort_t *)nvp->nvp_data);
4336 			}
4337 
4338 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4339 			    "nv_intr_pkt_pio: transition to HP2"));
4340 		} else {
4341 			/* still in ATAPI state - HP2 */
4342 
4343 			/*
4344 			 * In order to avoid clobbering the rqsense data
4345 			 * set by the SATA framework, the sense data read
4346 			 * from the device is put in a separate buffer and
4347 			 * copied into the packet after the request sense
4348 			 * command successfully completes.
4349 			 */
4350 			bcopy(nv_slotp->nvslot_rqsense_buff,
4351 			    spkt->satapkt_cmd.satacmd_rqsense,
4352 			    SATA_ATAPI_RQSENSE_LEN);
4353 
4354 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4355 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4356 
4357 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4358 			    "nv_intr_pkt_pio: request sense done"));
4359 		}
4360 
4361 		return;
4362 	}
4363 
4364 	/*
4365 	 * Normal command processing
4366 	 */
4367 
4368 	if ((status & (SATA_STATUS_DRQ)) != 0) {
4369 		/* ATAPI protocol state - HP4: Transfer_Data */
4370 
4371 		/* read the byte count from the controller */
4372 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4373 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4374 
4375 		if (ctlr_count == 0) {
4376 			/* no data to transfer - some devices do this */
4377 
4378 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4379 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4380 
4381 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4382 			    "nv_intr_pkt_pio: done (no data)"));
4383 
4384 			return;
4385 		}
4386 
4387 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
4388 
4389 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4390 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
4391 
4392 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4393 		    "nv_intr_pkt_pio: byte_count 0x%x",
4394 		    nv_slotp->nvslot_byte_count));
4395 
4396 		/* transfer the data */
4397 
4398 		if (direction == SATA_DIR_READ) {
4399 			ddi_rep_get16(cmdhdl,
4400 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4401 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4402 			    DDI_DEV_NO_AUTOINCR);
4403 
4404 			ctlr_count -= count;
4405 
4406 			if (ctlr_count > 0) {
4407 				/* consume remainding bytes */
4408 
4409 				for (; ctlr_count > 0;
4410 				    ctlr_count -= 2)
4411 					(void) ddi_get16(cmdhdl,
4412 					    (ushort_t *)nvp->nvp_data);
4413 
4414 				NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4415 				    "nv_intr_pkt_pio: bytes remained"));
4416 			}
4417 		} else {
4418 			ddi_rep_put16(cmdhdl,
4419 			    (ushort_t *)nv_slotp->nvslot_v_addr,
4420 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4421 			    DDI_DEV_NO_AUTOINCR);
4422 		}
4423 
4424 		nv_slotp->nvslot_v_addr += count;
4425 		nv_slotp->nvslot_byte_count -= count;
4426 
4427 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4428 		    "nv_intr_pkt_pio: transition to HP2"));
4429 	} else {
4430 		/* still in ATAPI state - HP2 */
4431 
4432 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4433 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4434 
4435 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4436 		    "nv_intr_pkt_pio: done"));
4437 	}
4438 }
4439 
4440 
4441 /*
4442  * ATA command, DMA data in/out
4443  */
4444 static void
4445 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4446 {
4447 	uchar_t status;
4448 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4449 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4450 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4451 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4452 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4453 	uchar_t	bmicx;
4454 	uchar_t bm_status;
4455 
4456 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4457 
4458 	/*
4459 	 * stop DMA engine.
4460 	 */
4461 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4462 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4463 
4464 	/*
4465 	 * get the status and clear the IRQ, and check for DMA error
4466 	 */
4467 	status = nv_get8(cmdhdl, nvp->nvp_status);
4468 
4469 	/*
4470 	 * check for drive errors
4471 	 */
4472 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4473 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4474 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4475 		(void) nv_bm_status_clear(nvp);
4476 
4477 		return;
4478 	}
4479 
4480 	bm_status = nv_bm_status_clear(nvp);
4481 
4482 	/*
4483 	 * check for bus master errors
4484 	 */
4485 	if (bm_status & BMISX_IDERR) {
4486 		spkt->satapkt_reason = SATA_PKT_RESET;
4487 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4488 		    nvp->nvp_altstatus);
4489 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4490 		nv_reset(nvp);
4491 
4492 		return;
4493 	}
4494 
4495 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4496 }
4497 
4498 
4499 /*
4500  * Wait for a register of a controller to achieve a specific state.
4501  * To return normally, all the bits in the first sub-mask must be ON,
4502  * all the bits in the second sub-mask must be OFF.
4503  * If timeout_usec microseconds pass without the controller achieving
4504  * the desired bit configuration, return TRUE, else FALSE.
4505  *
4506  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4507  * occur for the first 250 us, then switch over to a sleeping wait.
4508  *
4509  */
4510 int
4511 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4512     int type_wait)
4513 {
4514 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4515 	hrtime_t end, cur, start_sleep, start;
4516 	int first_time = B_TRUE;
4517 	ushort_t val;
4518 
4519 	for (;;) {
4520 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4521 
4522 		if ((val & onbits) == onbits && (val & offbits) == 0) {
4523 
4524 			return (B_TRUE);
4525 		}
4526 
4527 		cur = gethrtime();
4528 
4529 		/*
4530 		 * store the start time and calculate the end
4531 		 * time.  also calculate "start_sleep" which is
4532 		 * the point after which the driver will stop busy
4533 		 * waiting and change to sleep waiting.
4534 		 */
4535 		if (first_time) {
4536 			first_time = B_FALSE;
4537 			/*
4538 			 * start and end are in nanoseconds
4539 			 */
4540 			start = cur;
4541 			end = start + timeout_usec * 1000;
4542 			/*
4543 			 * add 1 ms to start
4544 			 */
4545 			start_sleep =  start + 250000;
4546 
4547 			if (servicing_interrupt()) {
4548 				type_wait = NV_NOSLEEP;
4549 			}
4550 		}
4551 
4552 		if (cur > end) {
4553 
4554 			break;
4555 		}
4556 
4557 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4558 #if ! defined(__lock_lint)
4559 			delay(1);
4560 #endif
4561 		} else {
4562 			drv_usecwait(nv_usec_delay);
4563 		}
4564 	}
4565 
4566 	return (B_FALSE);
4567 }
4568 
4569 
4570 /*
4571  * This is a slightly more complicated version that checks
4572  * for error conditions and bails-out rather than looping
4573  * until the timeout is exceeded.
4574  *
4575  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4576  * occur for the first 250 us, then switch over to a sleeping wait.
4577  */
4578 int
4579 nv_wait3(
4580 	nv_port_t	*nvp,
4581 	uchar_t		onbits1,
4582 	uchar_t		offbits1,
4583 	uchar_t		failure_onbits2,
4584 	uchar_t		failure_offbits2,
4585 	uchar_t		failure_onbits3,
4586 	uchar_t		failure_offbits3,
4587 	uint_t		timeout_usec,
4588 	int		type_wait)
4589 {
4590 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4591 	hrtime_t end, cur, start_sleep, start;
4592 	int first_time = B_TRUE;
4593 	ushort_t val;
4594 
4595 	for (;;) {
4596 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4597 
4598 		/*
4599 		 * check for expected condition
4600 		 */
4601 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4602 
4603 			return (B_TRUE);
4604 		}
4605 
4606 		/*
4607 		 * check for error conditions
4608 		 */
4609 		if ((val & failure_onbits2) == failure_onbits2 &&
4610 		    (val & failure_offbits2) == 0) {
4611 
4612 			return (B_FALSE);
4613 		}
4614 
4615 		if ((val & failure_onbits3) == failure_onbits3 &&
4616 		    (val & failure_offbits3) == 0) {
4617 
4618 			return (B_FALSE);
4619 		}
4620 
4621 		/*
4622 		 * store the start time and calculate the end
4623 		 * time.  also calculate "start_sleep" which is
4624 		 * the point after which the driver will stop busy
4625 		 * waiting and change to sleep waiting.
4626 		 */
4627 		if (first_time) {
4628 			first_time = B_FALSE;
4629 			/*
4630 			 * start and end are in nanoseconds
4631 			 */
4632 			cur = start = gethrtime();
4633 			end = start + timeout_usec * 1000;
4634 			/*
4635 			 * add 1 ms to start
4636 			 */
4637 			start_sleep =  start + 250000;
4638 
4639 			if (servicing_interrupt()) {
4640 				type_wait = NV_NOSLEEP;
4641 			}
4642 		} else {
4643 			cur = gethrtime();
4644 		}
4645 
4646 		if (cur > end) {
4647 
4648 			break;
4649 		}
4650 
4651 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4652 #if ! defined(__lock_lint)
4653 			delay(1);
4654 #endif
4655 		} else {
4656 			drv_usecwait(nv_usec_delay);
4657 		}
4658 	}
4659 
4660 	return (B_FALSE);
4661 }
4662 
4663 
4664 /*
4665  * nv_check_link() checks if a specified link is active device present
4666  * and communicating.
4667  */
4668 static boolean_t
4669 nv_check_link(uint32_t sstatus)
4670 {
4671 	uint8_t det;
4672 
4673 	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4674 
4675 	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4676 }
4677 
4678 
4679 /*
4680  * nv_port_state_change() reports the state of the port to the
4681  * sata module by calling sata_hba_event_notify().  This
4682  * function is called any time the state of the port is changed
4683  */
4684 static void
4685 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4686 {
4687 	sata_device_t sd;
4688 
4689 	bzero((void *)&sd, sizeof (sata_device_t));
4690 	sd.satadev_rev = SATA_DEVICE_REV;
4691 	nv_copy_registers(nvp, &sd, NULL);
4692 
4693 	/*
4694 	 * When NCQ is implemented sactive and snotific field need to be
4695 	 * updated.
4696 	 */
4697 	sd.satadev_addr.cport = nvp->nvp_port_num;
4698 	sd.satadev_addr.qual = addr_type;
4699 	sd.satadev_state = state;
4700 
4701 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4702 }
4703 
4704 
4705 /*
4706  * timeout processing:
4707  *
4708  * Check if any packets have crossed a timeout threshold.  If so, then
4709  * abort the packet.  This function is not NCQ aware.
4710  *
4711  * If reset was invoked in any other place than nv_sata_probe(), then
4712  * monitor for reset completion here.
4713  *
4714  */
4715 static void
4716 nv_timeout(void *arg)
4717 {
4718 	nv_port_t *nvp = arg;
4719 	nv_slot_t *nv_slotp;
4720 	int restart_timeout = B_FALSE;
4721 
4722 	mutex_enter(&nvp->nvp_mutex);
4723 
4724 	/*
4725 	 * If the probe entry point is driving the reset and signature
4726 	 * acquisition, just return.
4727 	 */
4728 	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
4729 		goto finished;
4730 	}
4731 
4732 	/*
4733 	 * If the port is not in the init state, it likely
4734 	 * means the link was lost while a timeout was active.
4735 	 */
4736 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
4737 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4738 		    "nv_timeout: port uninitialized"));
4739 
4740 		goto finished;
4741 	}
4742 
4743 	if (nvp->nvp_state & NV_PORT_RESET) {
4744 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4745 		uint32_t sstatus;
4746 
4747 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4748 		    "nv_timeout(): port waiting for signature"));
4749 
4750 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4751 
4752 		/*
4753 		 * check for link presence.  If the link remains
4754 		 * missing for more than 2 seconds, send a remove
4755 		 * event and abort signature acquisition.
4756 		 */
4757 		if (nv_check_link(sstatus) == B_FALSE) {
4758 			clock_t e_link_lost = ddi_get_lbolt();
4759 
4760 			if (nvp->nvp_link_lost_time == 0) {
4761 				nvp->nvp_link_lost_time = e_link_lost;
4762 			}
4763 			if (TICK_TO_SEC(e_link_lost -
4764 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
4765 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4766 				    "probe: intermittent link lost while"
4767 				    " resetting"));
4768 				restart_timeout = B_TRUE;
4769 			} else {
4770 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4771 				    "link lost during signature acquisition."
4772 				    "  Giving up"));
4773 				nv_port_state_change(nvp,
4774 				    SATA_EVNT_DEVICE_DETACHED|
4775 				    SATA_EVNT_LINK_LOST,
4776 				    SATA_ADDR_CPORT, 0);
4777 				nvp->nvp_state |= NV_PORT_HOTREMOVED;
4778 				nvp->nvp_state &= ~NV_PORT_RESET;
4779 			}
4780 
4781 			goto finished;
4782 		} else {
4783 
4784 			nvp->nvp_link_lost_time = 0;
4785 		}
4786 
4787 		nv_read_signature(nvp);
4788 
4789 		if (nvp->nvp_signature != 0) {
4790 			if ((nvp->nvp_type == SATA_DTYPE_ATADISK) ||
4791 			    (nvp->nvp_type == SATA_DTYPE_ATAPICD)) {
4792 				nvp->nvp_state |= NV_PORT_RESTORE;
4793 				nv_port_state_change(nvp,
4794 				    SATA_EVNT_DEVICE_RESET,
4795 				    SATA_ADDR_DCPORT,
4796 				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
4797 			}
4798 
4799 			goto finished;
4800 		}
4801 
4802 		/*
4803 		 * Reset if more than 5 seconds has passed without
4804 		 * acquiring a signature.
4805 		 */
4806 		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
4807 			nv_reset(nvp);
4808 		}
4809 
4810 		restart_timeout = B_TRUE;
4811 		goto finished;
4812 	}
4813 
4814 
4815 	/*
4816 	 * not yet NCQ aware
4817 	 */
4818 	nv_slotp = &(nvp->nvp_slot[0]);
4819 
4820 	/*
4821 	 * this happens early on before nv_slotp is set
4822 	 * up OR when a device was unexpectedly removed and
4823 	 * there was an active packet.
4824 	 */
4825 	if (nv_slotp == NULL) {
4826 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4827 		    "nv_timeout: nv_slotp == NULL"));
4828 
4829 		goto finished;
4830 	}
4831 
4832 	/*
4833 	 * perform timeout checking and processing only if there is an
4834 	 * active packet on the port
4835 	 */
4836 	if (nv_slotp->nvslot_spkt != NULL)  {
4837 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4838 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4839 		uint8_t cmd = satacmd->satacmd_cmd_reg;
4840 		uint64_t lba;
4841 
4842 #if ! defined(__lock_lint) && defined(DEBUG)
4843 
4844 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
4845 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
4846 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
4847 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
4848 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
4849 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
4850 #endif
4851 
4852 		/*
4853 		 * timeout not needed if there is a polling thread
4854 		 */
4855 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
4856 
4857 			goto finished;
4858 		}
4859 
4860 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
4861 		    spkt->satapkt_time) {
4862 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4863 			    "abort timeout: "
4864 			    "nvslot_stime: %ld max ticks till timeout: "
4865 			    "%ld cur_time: %ld cmd=%x lba=%d",
4866 			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
4867 			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
4868 
4869 			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
4870 
4871 		} else {
4872 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
4873 			    " still in use so restarting timeout"));
4874 		}
4875 		restart_timeout = B_TRUE;
4876 
4877 	} else {
4878 		/*
4879 		 * there was no active packet, so do not re-enable timeout
4880 		 */
4881 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4882 		    "nv_timeout: no active packet so not re-arming timeout"));
4883 	}
4884 
4885 	finished:
4886 
4887 	if (restart_timeout == B_TRUE) {
4888 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
4889 		    drv_usectohz(NV_ONE_SEC));
4890 	} else {
4891 		nvp->nvp_timeout_id = 0;
4892 	}
4893 	mutex_exit(&nvp->nvp_mutex);
4894 }
4895 
4896 
4897 /*
4898  * enable or disable the 3 interrupt types the driver is
4899  * interested in: completion, add and remove.
4900  */
4901 static void
4902 mcp04_set_intr(nv_port_t *nvp, int flag)
4903 {
4904 	nv_ctl_t *nvc = nvp->nvp_ctlp;
4905 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
4906 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
4907 	uint8_t intr_bits[] = { MCP04_INT_PDEV_HOT|MCP04_INT_PDEV_INT,
4908 	    MCP04_INT_SDEV_HOT|MCP04_INT_SDEV_INT };
4909 	uint8_t clear_all_bits[] = { MCP04_INT_PDEV_ALL, MCP04_INT_SDEV_ALL };
4910 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
4911 
4912 	ASSERT(mutex_owned(&nvp->nvp_mutex));
4913 
4914 	/*
4915 	 * controller level lock also required since access to an 8-bit
4916 	 * interrupt register is shared between both channels.
4917 	 */
4918 	mutex_enter(&nvc->nvc_mutex);
4919 
4920 	if (flag & NV_INTR_CLEAR_ALL) {
4921 		NVLOG((NVDBG_INTR, nvc, nvp,
4922 		    "mcp04_set_intr: NV_INTR_CLEAR_ALL"));
4923 
4924 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
4925 		    (uint8_t *)(nvc->nvc_mcp04_int_status));
4926 
4927 		if (intr_status & clear_all_bits[port]) {
4928 
4929 			nv_put8(nvc->nvc_bar_hdl[5],
4930 			    (uint8_t *)(nvc->nvc_mcp04_int_status),
4931 			    clear_all_bits[port]);
4932 
4933 			NVLOG((NVDBG_INTR, nvc, nvp,
4934 			    "interrupt bits cleared %x",
4935 			    intr_status & clear_all_bits[port]));
4936 		}
4937 	}
4938 
4939 	if (flag & NV_INTR_DISABLE) {
4940 		NVLOG((NVDBG_INTR, nvc, nvp,
4941 		    "mcp04_set_intr: NV_INTR_DISABLE"));
4942 		int_en = nv_get8(bar5_hdl,
4943 		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
4944 		int_en &= ~intr_bits[port];
4945 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
4946 		    int_en);
4947 	}
4948 
4949 	if (flag & NV_INTR_ENABLE) {
4950 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp04_set_intr: NV_INTR_ENABLE"));
4951 		int_en = nv_get8(bar5_hdl,
4952 		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
4953 		int_en |= intr_bits[port];
4954 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
4955 		    int_en);
4956 	}
4957 
4958 	mutex_exit(&nvc->nvc_mutex);
4959 }
4960 
4961 
4962 /*
4963  * enable or disable the 3 interrupts the driver is interested in:
4964  * completion interrupt, hot add, and hot remove interrupt.
4965  */
4966 static void
4967 mcp55_set_intr(nv_port_t *nvp, int flag)
4968 {
4969 	nv_ctl_t *nvc = nvp->nvp_ctlp;
4970 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
4971 	uint16_t intr_bits =
4972 	    MCP55_INT_ADD|MCP55_INT_REM|MCP55_INT_COMPLETE;
4973 	uint16_t int_en;
4974 
4975 	ASSERT(mutex_owned(&nvp->nvp_mutex));
4976 
4977 	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
4978 
4979 	if (flag & NV_INTR_CLEAR_ALL) {
4980 		NVLOG((NVDBG_INTR, nvc, nvp,
4981 		    "mcp55_set_intr: NV_INTR_CLEAR_ALL"));
4982 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, MCP55_INT_CLEAR);
4983 	}
4984 
4985 	if (flag & NV_INTR_ENABLE) {
4986 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_set_intr: NV_INTR_ENABLE"));
4987 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
4988 		int_en |= intr_bits;
4989 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
4990 	}
4991 
4992 	if (flag & NV_INTR_DISABLE) {
4993 		NVLOG((NVDBG_INTR, nvc, nvp,
4994 		    "mcp55_set_intr: NV_INTR_DISABLE"));
4995 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
4996 		int_en &= ~intr_bits;
4997 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
4998 	}
4999 }
5000 
5001 
5002 /*
5003  * The PM functions for suspend and resume are incomplete and need additional
5004  * work.  It may or may not work in the current state.
5005  */
5006 static void
5007 nv_resume(nv_port_t *nvp)
5008 {
5009 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
5010 
5011 	mutex_enter(&nvp->nvp_mutex);
5012 
5013 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5014 		mutex_exit(&nvp->nvp_mutex);
5015 
5016 		return;
5017 	}
5018 
5019 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5020 
5021 	/*
5022 	 * power may have been removed to the port and the
5023 	 * drive, and/or a drive may have been added or removed.
5024 	 * Force a reset which will cause a probe and re-establish
5025 	 * any state needed on the drive.
5026 	 * nv_reset(nvp);
5027 	 */
5028 
5029 	nv_reset(nvp);
5030 
5031 	mutex_exit(&nvp->nvp_mutex);
5032 }
5033 
5034 /*
5035  * The PM functions for suspend and resume are incomplete and need additional
5036  * work.  It may or may not work in the current state.
5037  */
5038 static void
5039 nv_suspend(nv_port_t *nvp)
5040 {
5041 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
5042 
5043 	mutex_enter(&nvp->nvp_mutex);
5044 
5045 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5046 		mutex_exit(&nvp->nvp_mutex);
5047 
5048 		return;
5049 	}
5050 
5051 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE);
5052 
5053 	/*
5054 	 * power may have been removed to the port and the
5055 	 * drive, and/or a drive may have been added or removed.
5056 	 * Force a reset which will cause a probe and re-establish
5057 	 * any state needed on the drive.
5058 	 * nv_reset(nvp);
5059 	 */
5060 
5061 	mutex_exit(&nvp->nvp_mutex);
5062 }
5063 
5064 
5065 static void
5066 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
5067 {
5068 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5069 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
5070 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5071 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5072 	uchar_t status;
5073 	struct sata_cmd_flags flags;
5074 
5075 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
5076 
5077 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5078 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
5079 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
5080 
5081 	if (spkt == NULL) {
5082 
5083 		return;
5084 	}
5085 
5086 	/*
5087 	 * in the error case, implicitly set the return of regs needed
5088 	 * for error handling.
5089 	 */
5090 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
5091 	    nvp->nvp_altstatus);
5092 
5093 	flags = scmd->satacmd_flags;
5094 
5095 	if (status & SATA_STATUS_ERR) {
5096 		flags.sata_copy_out_lba_low_msb = B_TRUE;
5097 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
5098 		flags.sata_copy_out_lba_high_msb = B_TRUE;
5099 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
5100 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
5101 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
5102 		flags.sata_copy_out_error_reg = B_TRUE;
5103 		flags.sata_copy_out_sec_count_msb = B_TRUE;
5104 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
5105 		scmd->satacmd_status_reg = status;
5106 	}
5107 
5108 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
5109 
5110 		/*
5111 		 * set HOB so that high byte will be read
5112 		 */
5113 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
5114 
5115 		/*
5116 		 * get the requested high bytes
5117 		 */
5118 		if (flags.sata_copy_out_sec_count_msb) {
5119 			scmd->satacmd_sec_count_msb =
5120 			    nv_get8(cmdhdl, nvp->nvp_count);
5121 		}
5122 
5123 		if (flags.sata_copy_out_lba_low_msb) {
5124 			scmd->satacmd_lba_low_msb =
5125 			    nv_get8(cmdhdl, nvp->nvp_sect);
5126 		}
5127 
5128 		if (flags.sata_copy_out_lba_mid_msb) {
5129 			scmd->satacmd_lba_mid_msb =
5130 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
5131 		}
5132 
5133 		if (flags.sata_copy_out_lba_high_msb) {
5134 			scmd->satacmd_lba_high_msb =
5135 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
5136 		}
5137 	}
5138 
5139 	/*
5140 	 * disable HOB so that low byte is read
5141 	 */
5142 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
5143 
5144 	/*
5145 	 * get the requested low bytes
5146 	 */
5147 	if (flags.sata_copy_out_sec_count_lsb) {
5148 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
5149 	}
5150 
5151 	if (flags.sata_copy_out_lba_low_lsb) {
5152 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
5153 	}
5154 
5155 	if (flags.sata_copy_out_lba_mid_lsb) {
5156 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
5157 	}
5158 
5159 	if (flags.sata_copy_out_lba_high_lsb) {
5160 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
5161 	}
5162 
5163 	/*
5164 	 * get the device register if requested
5165 	 */
5166 	if (flags.sata_copy_out_device_reg) {
5167 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
5168 	}
5169 
5170 	/*
5171 	 * get the error register if requested
5172 	 */
5173 	if (flags.sata_copy_out_error_reg) {
5174 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5175 	}
5176 }
5177 
5178 
5179 /*
5180  * Hot plug and remove interrupts can occur when the device is reset.  Just
5181  * masking the interrupt doesn't always work well because if a
5182  * different interrupt arrives on the other port, the driver can still
5183  * end up checking the state of the other port and discover the hot
5184  * interrupt flag is set even though it was masked.  Checking for recent
5185  * reset activity and then ignoring turns out to be the easiest way.
5186  */
5187 static void
5188 nv_report_add_remove(nv_port_t *nvp, int flags)
5189 {
5190 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5191 	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
5192 	uint32_t sstatus;
5193 	int i;
5194 
5195 	/*
5196 	 * If reset within last 1 second ignore.  This should be
5197 	 * reworked and improved instead of having this somewhat
5198 	 * heavy handed clamping job.
5199 	 */
5200 	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
5201 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
5202 		    "ignoring plug interrupt was %dms ago",
5203 		    TICK_TO_MSEC(time_diff)));
5204 
5205 		return;
5206 	}
5207 
5208 	/*
5209 	 * wait up to 1ms for sstatus to settle and reflect the true
5210 	 * status of the port.  Failure to do so can create confusion
5211 	 * in probe, where the incorrect sstatus value can still
5212 	 * persist.
5213 	 */
5214 	for (i = 0; i < 1000; i++) {
5215 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5216 
5217 		if ((flags == NV_PORT_HOTREMOVED) &&
5218 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
5219 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5220 			break;
5221 		}
5222 
5223 		if ((flags != NV_PORT_HOTREMOVED) &&
5224 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
5225 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
5226 			break;
5227 		}
5228 		drv_usecwait(1);
5229 	}
5230 
5231 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5232 	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
5233 
5234 	if (flags == NV_PORT_HOTREMOVED) {
5235 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5236 		    "nv_report_add_remove() hot removed"));
5237 		nv_port_state_change(nvp,
5238 		    SATA_EVNT_DEVICE_DETACHED,
5239 		    SATA_ADDR_CPORT, 0);
5240 
5241 		nvp->nvp_state |= NV_PORT_HOTREMOVED;
5242 	} else {
5243 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
5244 		    "nv_report_add_remove() hot plugged"));
5245 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5246 		    SATA_ADDR_CPORT, 0);
5247 	}
5248 }
5249 
5250 
5251 /*
5252  * Get request sense data and stuff it the command's sense buffer.
5253  * Start a request sense command in order to get sense data to insert
5254  * in the sata packet's rqsense buffer.  The command completion
5255  * processing is in nv_intr_pkt_pio.
5256  *
5257  * The sata framework provides a function to allocate and set-up a
5258  * request sense packet command. The reasons it is not being used here is:
5259  * a) it cannot be called in an interrupt context and this function is
5260  *    called in an interrupt context.
5261  * b) it allocates DMA resources that are not used here because this is
5262  *    implemented using PIO.
5263  *
5264  * If, in the future, this is changed to use DMA, the sata framework should
5265  * be used to allocate and set-up the error retrieval (request sense)
5266  * command.
5267  */
5268 static int
5269 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
5270 {
5271 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5272 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5273 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5274 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
5275 
5276 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5277 	    "nv_start_rqsense_pio: start"));
5278 
5279 	/* clear the local request sense buffer before starting the command */
5280 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
5281 
5282 	/* Write the request sense PACKET command */
5283 
5284 	/* select the drive */
5285 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
5286 
5287 	/* make certain the drive selected */
5288 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
5289 	    NV_SEC2USEC(5), 0) == B_FALSE) {
5290 		NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5291 		    "nv_start_rqsense_pio: drive select failed"));
5292 		return (NV_FAILURE);
5293 	}
5294 
5295 	/* set up the command */
5296 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
5297 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
5298 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
5299 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
5300 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
5301 
5302 	/* initiate the command by writing the command register last */
5303 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
5304 
5305 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
5306 	NV_DELAY_NSEC(400);
5307 
5308 	/*
5309 	 * Wait for the device to indicate that it is ready for the command
5310 	 * ATAPI protocol state - HP0: Check_Status_A
5311 	 */
5312 
5313 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
5314 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
5315 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
5316 	    4000000, 0) == B_FALSE) {
5317 		if (nv_get8(cmdhdl, nvp->nvp_status) &
5318 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
5319 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5320 			    "nv_start_rqsense_pio: rqsense dev error (HP0)"));
5321 		} else {
5322 			NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5323 			    "nv_start_rqsense_pio: rqsense timeout (HP0)"));
5324 		}
5325 
5326 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5327 		nv_complete_io(nvp, spkt, 0);
5328 		nv_reset(nvp);
5329 
5330 		return (NV_FAILURE);
5331 	}
5332 
5333 	/*
5334 	 * Put the ATAPI command in the data register
5335 	 * ATAPI protocol state - HP1: Send_Packet
5336 	 */
5337 
5338 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
5339 	    (ushort_t *)nvp->nvp_data,
5340 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
5341 
5342 	NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5343 	    "nv_start_rqsense_pio: exiting into HP3"));
5344 
5345 	return (NV_SUCCESS);
5346 }
5347