xref: /illumos-gate/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c (revision 5bb86dd8f405a48942aaaab3ca1f410ed7e6db4d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *
31  * nv_sata is a combo SATA HBA driver for ck804/mcp55 based chipsets.
32  *
33  * NCQ
34  * ---
35  *
36  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
37  * and is likely to be revisited in the future.
38  *
39  *
40  * Power Management
41  * ----------------
42  *
43  * Normally power management would be responsible for ensuring the device
44  * is quiescent and then changing power states to the device, such as
45  * powering down parts or all of the device.  mcp55/ck804 is unique in
46  * that it is only available as part of a larger southbridge chipset, so
47  * removing power to the device isn't possible.  Switches to control
48  * power management states D0/D3 in the PCI configuration space appear to
49  * be supported but changes to these states are apparently are ignored.
50  * The only further PM that the driver _could_ do is shut down the PHY,
51  * but in order to deliver the first rev of the driver sooner than later,
52  * that will be deferred until some future phase.
53  *
54  * Since the driver currently will not directly change any power state to
55  * the device, no power() entry point will be required.  However, it is
56  * possible that in ACPI power state S3, aka suspend to RAM, that power
57  * can be removed to the device, and the driver cannot rely on BIOS to
58  * have reset any state.  For the time being, there is no known
59  * non-default configurations that need to be programmed.  This judgement
60  * is based on the port of the legacy ata driver not having any such
61  * functionality and based on conversations with the PM team.  If such a
62  * restoration is later deemed necessary it can be incorporated into the
63  * DDI_RESUME processing.
64  *
65  */
66 
67 #include <sys/scsi/scsi.h>
68 #include <sys/pci.h>
69 #include <sys/byteorder.h>
70 #include <sys/sata/sata_hba.h>
71 #include <sys/sata/adapters/nv_sata/nv_sata.h>
72 #include <sys/disp.h>
73 #include <sys/note.h>
74 #include <sys/promif.h>
75 
76 
77 /*
78  * Function prototypes for driver entry points
79  */
80 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
81 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
82 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
83     void *arg, void **result);
84 
85 /*
86  * Function prototypes for entry points from sata service module
87  * These functions are distinguished from other local functions
88  * by the prefix "nv_sata_"
89  */
90 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
91 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
92 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
93 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
94 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
95 
96 /*
97  * Local function prototypes
98  */
99 static uint_t mcp55_intr(caddr_t arg1, caddr_t arg2);
100 static uint_t mcp04_intr(caddr_t arg1, caddr_t arg2);
101 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
102 #ifdef NV_MSI_SUPPORTED
103 static int nv_add_msi_intrs(nv_ctl_t *nvc);
104 #endif
105 static void nv_rem_intrs(nv_ctl_t *nvc);
106 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
107 static int nv_start_nodata(nv_port_t *nvp, int slot);
108 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
109 static int nv_start_pio_in(nv_port_t *nvp, int slot);
110 static int nv_start_pio_out(nv_port_t *nvp, int slot);
111 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
112 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
113 static int nv_start_dma(nv_port_t *nvp, int slot);
114 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
115 static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
116 static void nv_uninit_ctl(nv_ctl_t *nvc);
117 static void mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
118 static void mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
119 static void nv_uninit_port(nv_port_t *nvp);
120 static int nv_init_port(nv_port_t *nvp);
121 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
122 static int mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
123 #ifdef NCQ
124 static int mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
125 #endif
126 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
127 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
128     int state);
129 static boolean_t nv_check_link(uint32_t sstatus);
130 static void nv_common_reg_init(nv_ctl_t *nvc);
131 static void mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
132 static void nv_reset(nv_port_t *nvp);
133 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
134 static void nv_timeout(void *);
135 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
136 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
137 static void nv_read_signature(nv_port_t *nvp);
138 static void mcp55_set_intr(nv_port_t *nvp, int flag);
139 static void mcp04_set_intr(nv_port_t *nvp, int flag);
140 static void nv_resume(nv_port_t *nvp);
141 static void nv_suspend(nv_port_t *nvp);
142 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
143 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
144 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
145     sata_pkt_t *spkt);
146 static void nv_report_add_remove(nv_port_t *nvp, int flags);
147 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
148 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
149     uchar_t failure_onbits2, uchar_t failure_offbits2,
150     uchar_t failure_onbits3, uchar_t failure_offbits3,
151     uint_t timeout_usec, int type_wait);
152 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
153     uint_t timeout_usec, int type_wait);
154 
155 
156 /*
157  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
158  * Verify if needed if ported to other ISA.
159  */
160 static ddi_dma_attr_t buffer_dma_attr = {
161 	DMA_ATTR_V0,		/* dma_attr_version */
162 	0,			/* dma_attr_addr_lo: lowest bus address */
163 	0xffffffffull,		/* dma_attr_addr_hi: */
164 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
165 	4,			/* dma_attr_align */
166 	1,			/* dma_attr_burstsizes. */
167 	1,			/* dma_attr_minxfer */
168 	0xffffffffull,		/* dma_attr_max xfer including all cookies */
169 	0xffffffffull,		/* dma_attr_seg */
170 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
171 	512,			/* dma_attr_granular */
172 	0,			/* dma_attr_flags */
173 };
174 
175 
176 /*
177  * DMA attributes for PRD tables
178  */
179 ddi_dma_attr_t nv_prd_dma_attr = {
180 	DMA_ATTR_V0,		/* dma_attr_version */
181 	0,			/* dma_attr_addr_lo */
182 	0xffffffffull,		/* dma_attr_addr_hi */
183 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
184 	4,			/* dma_attr_align */
185 	1,			/* dma_attr_burstsizes */
186 	1,			/* dma_attr_minxfer */
187 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
188 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
189 	1,			/* dma_attr_sgllen */
190 	1,			/* dma_attr_granular */
191 	0			/* dma_attr_flags */
192 };
193 
194 /*
195  * Device access attributes
196  */
197 static ddi_device_acc_attr_t accattr = {
198     DDI_DEVICE_ATTR_V0,
199     DDI_STRUCTURE_LE_ACC,
200     DDI_STRICTORDER_ACC
201 };
202 
203 
204 static struct dev_ops nv_dev_ops = {
205 	DEVO_REV,		/* devo_rev */
206 	0,			/* refcnt  */
207 	nv_getinfo,		/* info */
208 	nulldev,		/* identify */
209 	nulldev,		/* probe */
210 	nv_attach,		/* attach */
211 	nv_detach,		/* detach */
212 	nodev,			/* no reset */
213 	(struct cb_ops *)0,	/* driver operations */
214 	NULL,			/* bus operations */
215 	NULL			/* power */
216 };
217 
218 static sata_tran_hotplug_ops_t nv_hotplug_ops;
219 
220 extern struct mod_ops mod_driverops;
221 
222 static  struct modldrv modldrv = {
223 	&mod_driverops,	/* driverops */
224 	"Nvidia ck804/mcp55 HBA v%I%",
225 	&nv_dev_ops,	/* driver ops */
226 };
227 
228 static  struct modlinkage modlinkage = {
229 	MODREV_1,
230 	&modldrv,
231 	NULL
232 };
233 
234 
235 /*
236  * wait between checks of reg status
237  */
238 int nv_usec_delay = NV_WAIT_REG_CHECK;
239 
240 /*
241  * The following is needed for nv_vcmn_err()
242  */
243 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
244 static char nv_log_buf[NV_STRING_512];
245 int nv_debug_flags = NVDBG_ALWAYS;
246 int nv_log_to_console = B_FALSE;
247 
248 int nv_log_delay = 0;
249 int nv_prom_print = B_FALSE;
250 
251 /*
252  * for debugging
253  */
254 #ifdef DEBUG
255 int ncq_commands = 0;
256 int non_ncq_commands = 0;
257 #endif
258 
259 /*
260  * Opaque state pointer to be initialized by ddi_soft_state_init()
261  */
262 static void *nv_statep	= NULL;
263 
264 
265 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
266 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
267 	nv_sata_activate,	/* activate port. cfgadm -c connect */
268 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
269 };
270 
271 
272 /*
273  *  nv module initialization
274  */
275 int
276 _init(void)
277 {
278 	int	error;
279 
280 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
281 
282 	if (error != 0) {
283 
284 		return (error);
285 	}
286 
287 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
288 
289 	if ((error = sata_hba_init(&modlinkage)) != 0) {
290 		ddi_soft_state_fini(&nv_statep);
291 		mutex_destroy(&nv_log_mutex);
292 
293 		return (error);
294 	}
295 
296 	error = mod_install(&modlinkage);
297 	if (error != 0) {
298 		sata_hba_fini(&modlinkage);
299 		ddi_soft_state_fini(&nv_statep);
300 		mutex_destroy(&nv_log_mutex);
301 
302 		return (error);
303 	}
304 
305 	return (error);
306 }
307 
308 
309 /*
310  * nv module uninitialize
311  */
312 int
313 _fini(void)
314 {
315 	int	error;
316 
317 	error = mod_remove(&modlinkage);
318 
319 	if (error != 0) {
320 		return (error);
321 	}
322 
323 	/*
324 	 * remove the resources allocated in _init()
325 	 */
326 	mutex_destroy(&nv_log_mutex);
327 	sata_hba_fini(&modlinkage);
328 	ddi_soft_state_fini(&nv_statep);
329 
330 	return (error);
331 }
332 
333 
334 /*
335  * nv _info entry point
336  */
337 int
338 _info(struct modinfo *modinfop)
339 {
340 	return (mod_info(&modlinkage, modinfop));
341 }
342 
343 
344 /*
345  * these wrappers for ddi_{get,put}8 are for observability
346  * with dtrace
347  */
348 #ifdef DEBUG
349 
350 static void
351 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
352 {
353 	ddi_put8(handle, dev_addr, value);
354 }
355 
356 static void
357 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
358 {
359 	ddi_put32(handle, dev_addr, value);
360 }
361 
362 static uint32_t
363 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
364 {
365 	return (ddi_get32(handle, dev_addr));
366 }
367 
368 static void
369 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
370 {
371 	ddi_put16(handle, dev_addr, value);
372 }
373 
374 static uint16_t
375 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
376 {
377 	return (ddi_get16(handle, dev_addr));
378 }
379 
380 static uint8_t
381 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
382 {
383 	return (ddi_get8(handle, dev_addr));
384 }
385 
386 #else
387 
388 #define	nv_put8 ddi_put8
389 #define	nv_put32 ddi_put32
390 #define	nv_get32 ddi_get32
391 #define	nv_put16 ddi_put16
392 #define	nv_get16 ddi_get16
393 #define	nv_get8 ddi_get8
394 
395 #endif
396 
397 
398 /*
399  * Driver attach
400  */
401 static int
402 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
403 {
404 	int status, attach_state, intr_types, bar, i, command;
405 	int inst = ddi_get_instance(dip);
406 	ddi_acc_handle_t pci_conf_handle;
407 	nv_ctl_t *nvc;
408 	uint8_t subclass;
409 	uint32_t reg32;
410 
411 	switch (cmd) {
412 
413 	case DDI_ATTACH:
414 
415 		NVLOG((NVDBG_INIT, NULL, NULL,
416 		    "nv_attach(): DDI_ATTACH inst %d", inst));
417 
418 		attach_state = ATTACH_PROGRESS_NONE;
419 
420 		status = ddi_soft_state_zalloc(nv_statep, inst);
421 
422 		if (status != DDI_SUCCESS) {
423 			break;
424 		}
425 
426 		nvc = ddi_get_soft_state(nv_statep, inst);
427 
428 		nvc->nvc_dip = dip;
429 
430 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
431 
432 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
433 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
434 			    PCI_CONF_REVID);
435 			NVLOG((NVDBG_INIT, NULL, NULL,
436 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
437 			    inst, nvc->nvc_revid, nv_debug_flags));
438 		} else {
439 			break;
440 		}
441 
442 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
443 
444 		/*
445 		 * If a device is attached after a suspend/resume, sometimes
446 		 * the command register is zero, as it might not be set by
447 		 * BIOS or a parent.  Set it again here.
448 		 */
449 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
450 
451 		if (command == 0) {
452 			cmn_err(CE_WARN, "nv_sata%d: restoring PCI command"
453 			    " register", inst);
454 			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
455 			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
456 		}
457 
458 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
459 
460 		if (subclass & PCI_MASS_RAID) {
461 			cmn_err(CE_WARN,
462 			    "attach failed: RAID mode not supported");
463 			break;
464 		}
465 
466 		/*
467 		 * the 6 bars of the controller are:
468 		 * 0: port 0 task file
469 		 * 1: port 0 status
470 		 * 2: port 1 task file
471 		 * 3: port 1 status
472 		 * 4: bus master for both ports
473 		 * 5: extended registers for SATA features
474 		 */
475 		for (bar = 0; bar < 6; bar++) {
476 			status = ddi_regs_map_setup(dip, bar + 1,
477 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
478 			    &nvc->nvc_bar_hdl[bar]);
479 
480 			if (status != DDI_SUCCESS) {
481 				NVLOG((NVDBG_INIT, nvc, NULL,
482 				    "ddi_regs_map_setup failure for bar"
483 				    " %d status = %d", bar, status));
484 				break;
485 			}
486 		}
487 
488 		attach_state |= ATTACH_PROGRESS_BARS;
489 
490 		/*
491 		 * initialize controller and driver core
492 		 */
493 		status = nv_init_ctl(nvc, pci_conf_handle);
494 
495 		if (status == NV_FAILURE) {
496 			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
497 
498 			break;
499 		}
500 
501 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
502 
503 		/*
504 		 * initialize mutexes
505 		 */
506 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
507 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
508 
509 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
510 
511 		/*
512 		 * get supported interrupt types
513 		 */
514 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
515 		    DDI_SUCCESS) {
516 			nv_cmn_err(CE_WARN, nvc, NULL,
517 			    "!ddi_intr_get_supported_types failed");
518 			NVLOG((NVDBG_INIT, nvc, NULL,
519 			    "interrupt supported types failed"));
520 
521 			break;
522 		}
523 
524 		NVLOG((NVDBG_INIT, nvc, NULL,
525 		    "ddi_intr_get_supported_types() returned: 0x%x",
526 		    intr_types));
527 
528 #ifdef NV_MSI_SUPPORTED
529 		if (intr_types & DDI_INTR_TYPE_MSI) {
530 			NVLOG((NVDBG_INIT, nvc, NULL,
531 			    "using MSI interrupt type"));
532 
533 			/*
534 			 * Try MSI first, but fall back to legacy if MSI
535 			 * attach fails
536 			 */
537 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
538 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
539 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
540 				NVLOG((NVDBG_INIT, nvc, NULL,
541 				    "MSI interrupt setup done"));
542 			} else {
543 				nv_cmn_err(CE_CONT, nvc, NULL,
544 				    "!MSI registration failed "
545 				    "will try Legacy interrupts");
546 			}
547 		}
548 #endif
549 
550 		/*
551 		 * Either the MSI interrupt setup has failed or only
552 		 * the fixed interrupts are available on the system.
553 		 */
554 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
555 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
556 
557 			NVLOG((NVDBG_INIT, nvc, NULL,
558 			    "using Legacy interrupt type"));
559 
560 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
561 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
562 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
563 				NVLOG((NVDBG_INIT, nvc, NULL,
564 				    "Legacy interrupt setup done"));
565 			} else {
566 				nv_cmn_err(CE_WARN, nvc, NULL,
567 				    "!legacy interrupt setup failed");
568 				NVLOG((NVDBG_INIT, nvc, NULL,
569 				    "legacy interrupt setup failed"));
570 				break;
571 			}
572 		}
573 
574 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
575 			NVLOG((NVDBG_INIT, nvc, NULL,
576 			    "no interrupts registered"));
577 			break;
578 		}
579 
580 		/*
581 		 * attach to sata module
582 		 */
583 		if (sata_hba_attach(nvc->nvc_dip,
584 		    &nvc->nvc_sata_hba_tran,
585 		    DDI_ATTACH) != DDI_SUCCESS) {
586 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
587 
588 			break;
589 		}
590 
591 		pci_config_teardown(&pci_conf_handle);
592 
593 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
594 
595 		return (DDI_SUCCESS);
596 
597 	case DDI_RESUME:
598 
599 		nvc = ddi_get_soft_state(nv_statep, inst);
600 
601 		NVLOG((NVDBG_INIT, nvc, NULL,
602 		    "nv_attach(): DDI_RESUME inst %d", inst));
603 
604 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
605 			return (DDI_FAILURE);
606 		}
607 
608 		/*
609 		 * If a device is attached after a suspend/resume, sometimes
610 		 * the command register is zero, as it might not be set by
611 		 * BIOS or a parent.  Set it again here.
612 		 */
613 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
614 
615 		if (command == 0) {
616 			pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
617 			    PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
618 		}
619 
620 		/*
621 		 * Need to set bit 2 to 1 at config offset 0x50
622 		 * to enable access to the bar5 registers.
623 		 */
624 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
625 
626 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
627 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
628 			    reg32 | NV_BAR5_SPACE_EN);
629 		}
630 
631 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
632 
633 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
634 			nv_resume(&(nvc->nvc_port[i]));
635 		}
636 
637 		pci_config_teardown(&pci_conf_handle);
638 
639 		return (DDI_SUCCESS);
640 
641 	default:
642 		return (DDI_FAILURE);
643 	}
644 
645 
646 	/*
647 	 * DDI_ATTACH failure path starts here
648 	 */
649 
650 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
651 		nv_rem_intrs(nvc);
652 	}
653 
654 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
655 		/*
656 		 * Remove timers
657 		 */
658 		int port = 0;
659 		nv_port_t *nvp;
660 
661 		for (; port < NV_MAX_PORTS(nvc); port++) {
662 			nvp = &(nvc->nvc_port[port]);
663 			if (nvp->nvp_timeout_id != 0) {
664 				(void) untimeout(nvp->nvp_timeout_id);
665 			}
666 		}
667 	}
668 
669 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
670 		mutex_destroy(&nvc->nvc_mutex);
671 	}
672 
673 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
674 		nv_uninit_ctl(nvc);
675 	}
676 
677 	if (attach_state & ATTACH_PROGRESS_BARS) {
678 		while (--bar >= 0) {
679 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
680 		}
681 	}
682 
683 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
684 		ddi_soft_state_free(nv_statep, inst);
685 	}
686 
687 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
688 		pci_config_teardown(&pci_conf_handle);
689 	}
690 
691 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
692 
693 	return (DDI_FAILURE);
694 }
695 
696 
697 static int
698 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
699 {
700 	int i, port, inst = ddi_get_instance(dip);
701 	nv_ctl_t *nvc;
702 	nv_port_t *nvp;
703 
704 	nvc = ddi_get_soft_state(nv_statep, inst);
705 
706 	switch (cmd) {
707 
708 	case DDI_DETACH:
709 
710 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
711 
712 		/*
713 		 * Remove interrupts
714 		 */
715 		nv_rem_intrs(nvc);
716 
717 		/*
718 		 * Remove timers
719 		 */
720 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
721 			nvp = &(nvc->nvc_port[port]);
722 			if (nvp->nvp_timeout_id != 0) {
723 				(void) untimeout(nvp->nvp_timeout_id);
724 			}
725 		}
726 
727 		/*
728 		 * Remove maps
729 		 */
730 		for (i = 0; i < 6; i++) {
731 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
732 		}
733 
734 		/*
735 		 * Destroy mutexes
736 		 */
737 		mutex_destroy(&nvc->nvc_mutex);
738 
739 		/*
740 		 * Uninitialize the controller
741 		 */
742 		nv_uninit_ctl(nvc);
743 
744 		/*
745 		 * unregister from the sata module
746 		 */
747 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
748 
749 		/*
750 		 * Free soft state
751 		 */
752 		ddi_soft_state_free(nv_statep, inst);
753 
754 		return (DDI_SUCCESS);
755 
756 	case DDI_SUSPEND:
757 		/*
758 		 * The PM functions for suspend and resume are incomplete
759 		 * and need additional work.  It may or may not work in
760 		 * the current state.
761 		 */
762 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
763 
764 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
765 			nv_suspend(&(nvc->nvc_port[i]));
766 		}
767 
768 		nvc->nvc_state |= NV_CTRL_SUSPEND;
769 
770 		return (DDI_SUCCESS);
771 
772 	default:
773 		return (DDI_FAILURE);
774 	}
775 }
776 
777 
778 /*ARGSUSED*/
779 static int
780 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
781 {
782 	nv_ctl_t *nvc;
783 	int instance;
784 	dev_t dev;
785 
786 	dev = (dev_t)arg;
787 	instance = getminor(dev);
788 
789 	switch (infocmd) {
790 	case DDI_INFO_DEVT2DEVINFO:
791 		nvc = ddi_get_soft_state(nv_statep,  instance);
792 		if (nvc != NULL) {
793 			*result = nvc->nvc_dip;
794 			return (DDI_SUCCESS);
795 		} else {
796 			*result = NULL;
797 			return (DDI_FAILURE);
798 		}
799 	case DDI_INFO_DEVT2INSTANCE:
800 		*(int *)result = instance;
801 		break;
802 	default:
803 		break;
804 	}
805 	return (DDI_SUCCESS);
806 }
807 
808 
809 /*
810  * Called by sata module to probe a port.  Port and device state
811  * are not changed here... only reported back to the sata module.
812  *
813  * If probe confirms a device is present for the first time, it will
814  * initiate a device reset, then probe will be called again and the
815  * signature will be check.  If the signature is valid, data structures
816  * will be initialized.
817  */
818 static int
819 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
820 {
821 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
822 	uint8_t cport = sd->satadev_addr.cport;
823 	uint8_t pmport = sd->satadev_addr.pmport;
824 	uint8_t qual = sd->satadev_addr.qual;
825 	clock_t nv_lbolt = ddi_get_lbolt();
826 	nv_port_t *nvp;
827 
828 	if (cport >= NV_MAX_PORTS(nvc)) {
829 		sd->satadev_type = SATA_DTYPE_NONE;
830 		sd->satadev_state = SATA_STATE_PROBED;
831 
832 		return (SATA_FAILURE);
833 	}
834 
835 	ASSERT(nvc->nvc_port != NULL);
836 	nvp = &(nvc->nvc_port[cport]);
837 	ASSERT(nvp != NULL);
838 
839 	NVLOG((NVDBG_PROBE, nvc, nvp,
840 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
841 	    "qual: 0x%x", cport, pmport, qual));
842 
843 	mutex_enter(&nvp->nvp_mutex);
844 
845 	/*
846 	 * This check seems to be done in the SATA module.
847 	 * It may not be required here
848 	 */
849 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
850 		nv_cmn_err(CE_WARN, nvc, nvp,
851 		    "port inactive.  Use cfgadm to activate");
852 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
853 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
854 		mutex_exit(&nvp->nvp_mutex);
855 
856 		return (SATA_FAILURE);
857 	}
858 
859 	if (qual == SATA_ADDR_PMPORT) {
860 		sd->satadev_type = SATA_DTYPE_NONE;
861 		sd->satadev_state = SATA_STATE_PROBED;
862 		mutex_exit(&nvp->nvp_mutex);
863 		nv_cmn_err(CE_WARN, nvc, nvp,
864 		    "controller does not support port multiplier");
865 
866 		return (SATA_FAILURE);
867 	}
868 
869 	sd->satadev_state = SATA_PSTATE_PWRON;
870 
871 	nv_copy_registers(nvp, sd, NULL);
872 
873 	/*
874 	 * determine link status
875 	 */
876 	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
877 		uint8_t det;
878 
879 		/*
880 		 * Reset will cause the link to go down for a short period of
881 		 * time.  If link is lost for less than 2 seconds ignore it
882 		 * so that the reset can progress.
883 		 */
884 		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
885 
886 			if (nvp->nvp_link_lost_time == 0) {
887 				nvp->nvp_link_lost_time = nv_lbolt;
888 			}
889 
890 			if (TICK_TO_SEC(nv_lbolt -
891 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
892 				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
893 				    "probe: intermittent link lost while"
894 				    " resetting"));
895 				/*
896 				 * fake status of link so that probe continues
897 				 */
898 				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
899 				    SSTATUS_IPM_ACTIVE);
900 				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
901 				    SSTATUS_DET_DEVPRE_PHYCOM);
902 				sd->satadev_type = SATA_DTYPE_UNKNOWN;
903 				mutex_exit(&nvp->nvp_mutex);
904 
905 				return (SATA_SUCCESS);
906 			} else {
907 				nvp->nvp_state &=
908 				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
909 			}
910 		}
911 
912 		/*
913 		 * no link, so tear down port and abort all active packets
914 		 */
915 
916 		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
917 		    SSTATUS_DET_SHIFT;
918 
919 		switch (det) {
920 		case SSTATUS_DET_NODEV:
921 		case SSTATUS_DET_PHYOFFLINE:
922 			sd->satadev_type = SATA_DTYPE_NONE;
923 			break;
924 		default:
925 			sd->satadev_type = SATA_DTYPE_UNKNOWN;
926 			break;
927 		}
928 
929 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
930 		    "probe: link lost invoking nv_abort_active"));
931 
932 		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
933 		nv_uninit_port(nvp);
934 
935 		mutex_exit(&nvp->nvp_mutex);
936 
937 		return (SATA_SUCCESS);
938 	} else {
939 		nvp->nvp_link_lost_time = 0;
940 	}
941 
942 	/*
943 	 * A device is present so clear hotremoved flag
944 	 */
945 	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
946 
947 	/*
948 	 * If the signature was acquired previously there is no need to
949 	 * do it again.
950 	 */
951 	if (nvp->nvp_signature != 0) {
952 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
953 		    "probe: signature acquired previously"));
954 		sd->satadev_type = nvp->nvp_type;
955 		mutex_exit(&nvp->nvp_mutex);
956 
957 		return (SATA_SUCCESS);
958 	}
959 
960 	/*
961 	 * If NV_PORT_RESET is not set, this is the first time through
962 	 * so perform reset and return.
963 	 */
964 	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
965 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
966 		    "probe: first reset to get sig"));
967 		nvp->nvp_state |= NV_PORT_RESET_PROBE;
968 		nv_reset(nvp);
969 		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
970 		nvp->nvp_probe_time = nv_lbolt;
971 		mutex_exit(&nvp->nvp_mutex);
972 
973 		return (SATA_SUCCESS);
974 	}
975 
976 	/*
977 	 * Reset was done previously.  see if the signature is
978 	 * available.
979 	 */
980 	nv_read_signature(nvp);
981 	sd->satadev_type = nvp->nvp_type;
982 
983 	/*
984 	 * Some drives may require additional resets to get a
985 	 * valid signature.  If a drive was not just powered up, the signature
986 	 * should arrive within half a second of reset.  Therefore if more
987 	 * than 5 seconds has elapsed while waiting for a signature, reset
988 	 * again.  These extra resets do not appear to create problems when
989 	 * the drive is spinning up for more than this reset period.
990 	 */
991 	if (nvp->nvp_signature == 0) {
992 		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
993 			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
994 			    " during signature acquisition"));
995 			nv_reset(nvp);
996 		}
997 
998 		mutex_exit(&nvp->nvp_mutex);
999 
1000 		return (SATA_SUCCESS);
1001 	}
1002 
1003 	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
1004 	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
1005 
1006 	/*
1007 	 * nv_sata only deals with ATA disks so far.  If it is
1008 	 * not an ATA disk, then just return.
1009 	 */
1010 	if (nvp->nvp_type != SATA_DTYPE_ATADISK) {
1011 		nv_cmn_err(CE_WARN, nvc, nvp, "Driver currently handles only"
1012 		    " disks.  Signature acquired was %X", nvp->nvp_signature);
1013 		mutex_exit(&nvp->nvp_mutex);
1014 
1015 		return (SATA_SUCCESS);
1016 	}
1017 
1018 	/*
1019 	 * make sure structures are initialized
1020 	 */
1021 	if (nv_init_port(nvp) == NV_SUCCESS) {
1022 		NVLOG((NVDBG_PROBE, nvc, nvp,
1023 		    "device detected and set up at port %d", cport));
1024 		mutex_exit(&nvp->nvp_mutex);
1025 
1026 		return (SATA_SUCCESS);
1027 	} else {
1028 		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
1029 		    "structures for port %d", cport);
1030 		mutex_exit(&nvp->nvp_mutex);
1031 
1032 		return (SATA_FAILURE);
1033 	}
1034 	/*NOTREACHED*/
1035 }
1036 
1037 
1038 /*
1039  * Called by sata module to start a new command.
1040  */
1041 static int
1042 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1043 {
1044 	int cport = spkt->satapkt_device.satadev_addr.cport;
1045 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1046 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1047 	int ret;
1048 
1049 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1050 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1051 
1052 	mutex_enter(&nvp->nvp_mutex);
1053 
1054 	/*
1055 	 * hotremoved is an intermediate state where the link was lost,
1056 	 * but the hotplug event has not yet been processed by the sata
1057 	 * module.  Fail the request.
1058 	 */
1059 	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1060 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1061 		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1062 		NVLOG((NVDBG_ERRS, nvc, nvp,
1063 		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1064 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1065 		mutex_exit(&nvp->nvp_mutex);
1066 
1067 		return (SATA_TRAN_PORT_ERROR);
1068 	}
1069 
1070 	if (nvp->nvp_state & NV_PORT_RESET) {
1071 		NVLOG((NVDBG_ERRS, nvc, nvp,
1072 		    "still waiting for reset completion"));
1073 		spkt->satapkt_reason = SATA_PKT_BUSY;
1074 		mutex_exit(&nvp->nvp_mutex);
1075 
1076 		/*
1077 		 * If in panic, timeouts do not occur, so fake one
1078 		 * so that the signature can be acquired to complete
1079 		 * the reset handling.
1080 		 */
1081 		if (ddi_in_panic()) {
1082 			nv_timeout(nvp);
1083 		}
1084 
1085 		return (SATA_TRAN_BUSY);
1086 	}
1087 
1088 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1089 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1090 		NVLOG((NVDBG_ERRS, nvc, nvp,
1091 		    "nv_sata_start: SATA_DTYPE_NONE"));
1092 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1093 		mutex_exit(&nvp->nvp_mutex);
1094 
1095 		return (SATA_TRAN_PORT_ERROR);
1096 	}
1097 
1098 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_ATAPICD) {
1099 		ASSERT(nvp->nvp_type == SATA_DTYPE_ATAPICD);
1100 		nv_cmn_err(CE_WARN, nvc, nvp,
1101 		    "optical devices not supported");
1102 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1103 		mutex_exit(&nvp->nvp_mutex);
1104 
1105 		return (SATA_TRAN_CMD_UNSUPPORTED);
1106 	}
1107 
1108 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1109 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1110 		nv_cmn_err(CE_WARN, nvc, nvp,
1111 		    "port multipliers not supported by controller");
1112 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1113 		mutex_exit(&nvp->nvp_mutex);
1114 
1115 		return (SATA_TRAN_CMD_UNSUPPORTED);
1116 	}
1117 
1118 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1119 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1120 		NVLOG((NVDBG_ERRS, nvc, nvp,
1121 		    "nv_sata_start: port not yet initialized"));
1122 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1123 		mutex_exit(&nvp->nvp_mutex);
1124 
1125 		return (SATA_TRAN_PORT_ERROR);
1126 	}
1127 
1128 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1129 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1130 		NVLOG((NVDBG_ERRS, nvc, nvp,
1131 		    "nv_sata_start: NV_PORT_INACTIVE"));
1132 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1133 		mutex_exit(&nvp->nvp_mutex);
1134 
1135 		return (SATA_TRAN_PORT_ERROR);
1136 	}
1137 
1138 	if (nvp->nvp_state & NV_PORT_FAILED) {
1139 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1140 		NVLOG((NVDBG_ERRS, nvc, nvp,
1141 		    "nv_sata_start: NV_PORT_FAILED state"));
1142 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1143 		mutex_exit(&nvp->nvp_mutex);
1144 
1145 		return (SATA_TRAN_PORT_ERROR);
1146 	}
1147 
1148 	/*
1149 	 * after a device reset, and then when sata module restore processing
1150 	 * is complete, the sata module will set sata_clear_dev_reset which
1151 	 * indicates that restore processing has completed and normal
1152 	 * non-restore related commands should be processed.
1153 	 */
1154 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1155 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1156 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1157 		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1158 	}
1159 
1160 	/*
1161 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1162 	 * only allow commands which restore device state.  The sata module
1163 	 * marks such commands with with sata_ignore_dev_reset.
1164 	 *
1165 	 * during coredump, nv_reset is called and but then the restore
1166 	 * doesn't happen.  For now, workaround by ignoring the wait for
1167 	 * restore if the system is panicing.
1168 	 */
1169 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1170 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1171 	    (ddi_in_panic() == 0)) {
1172 		spkt->satapkt_reason = SATA_PKT_BUSY;
1173 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1174 		    "nv_sata_start: waiting for restore "));
1175 		mutex_exit(&nvp->nvp_mutex);
1176 
1177 		return (SATA_TRAN_BUSY);
1178 	}
1179 
1180 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1181 		spkt->satapkt_reason = SATA_PKT_BUSY;
1182 		NVLOG((NVDBG_ERRS, nvc, nvp,
1183 		    "nv_sata_start: NV_PORT_ABORTING"));
1184 		mutex_exit(&nvp->nvp_mutex);
1185 
1186 		return (SATA_TRAN_BUSY);
1187 	}
1188 
1189 	if (spkt->satapkt_op_mode &
1190 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1191 
1192 		ret = nv_start_sync(nvp, spkt);
1193 
1194 		mutex_exit(&nvp->nvp_mutex);
1195 
1196 		return (ret);
1197 	}
1198 
1199 	/*
1200 	 * start command asynchronous command
1201 	 */
1202 	ret = nv_start_async(nvp, spkt);
1203 
1204 	mutex_exit(&nvp->nvp_mutex);
1205 
1206 	return (ret);
1207 }
1208 
1209 
1210 /*
1211  * SATA_OPMODE_POLLING implies the driver is in a
1212  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1213  * If only SATA_OPMODE_SYNCH is set, the driver can use
1214  * interrupts and sleep wait on a cv.
1215  *
1216  * If SATA_OPMODE_POLLING is set, the driver can't use
1217  * interrupts and must busy wait and simulate the
1218  * interrupts by waiting for BSY to be cleared.
1219  *
1220  * Synchronous mode has to return BUSY if there are
1221  * any other commands already on the drive.
1222  */
1223 static int
1224 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1225 {
1226 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1227 	int ret;
1228 
1229 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1230 
1231 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1232 		spkt->satapkt_reason = SATA_PKT_BUSY;
1233 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1234 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1235 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1236 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1237 		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1238 
1239 		return (SATA_TRAN_BUSY);
1240 	}
1241 
1242 	/*
1243 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1244 	 */
1245 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1246 	    servicing_interrupt()) {
1247 		spkt->satapkt_reason = SATA_PKT_BUSY;
1248 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp,
1249 		    "SYNC mode not allowed during interrupt");
1250 
1251 		return (SATA_TRAN_BUSY);
1252 
1253 	}
1254 
1255 	/*
1256 	 * disable interrupt generation if in polled mode
1257 	 */
1258 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1259 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1260 	}
1261 
1262 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1263 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1264 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1265 		}
1266 
1267 		return (ret);
1268 	}
1269 
1270 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1271 		mutex_exit(&nvp->nvp_mutex);
1272 		ret = nv_poll_wait(nvp, spkt);
1273 		mutex_enter(&nvp->nvp_mutex);
1274 
1275 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1276 
1277 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1278 		    " done % reason %d", ret));
1279 
1280 		return (ret);
1281 	}
1282 
1283 	/*
1284 	 * non-polling synchronous mode handling.  The interrupt will signal
1285 	 * when the IO is completed.
1286 	 */
1287 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1288 
1289 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1290 
1291 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1292 	}
1293 
1294 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1295 	    " done % reason %d", spkt->satapkt_reason));
1296 
1297 	return (SATA_TRAN_ACCEPTED);
1298 }
1299 
1300 
1301 static int
1302 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1303 {
1304 	int ret;
1305 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1306 #if ! defined(__lock_lint)
1307 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1308 #endif
1309 
1310 	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1311 
1312 	for (;;) {
1313 
1314 		NV_DELAY_NSEC(400);
1315 
1316 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1317 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1318 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1319 			mutex_enter(&nvp->nvp_mutex);
1320 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1321 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1322 			nv_reset(nvp);
1323 			nv_complete_io(nvp, spkt, 0);
1324 			mutex_exit(&nvp->nvp_mutex);
1325 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1326 			    "SATA_STATUS_BSY"));
1327 
1328 			return (SATA_TRAN_ACCEPTED);
1329 		}
1330 
1331 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1332 
1333 		/*
1334 		 * Simulate interrupt.
1335 		 */
1336 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1337 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1338 
1339 		if (ret != DDI_INTR_CLAIMED) {
1340 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1341 			    " unclaimed -- resetting"));
1342 			mutex_enter(&nvp->nvp_mutex);
1343 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1344 			nv_reset(nvp);
1345 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1346 			nv_complete_io(nvp, spkt, 0);
1347 			mutex_exit(&nvp->nvp_mutex);
1348 
1349 			return (SATA_TRAN_ACCEPTED);
1350 		}
1351 
1352 #if ! defined(__lock_lint)
1353 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1354 			/*
1355 			 * packet is complete
1356 			 */
1357 			return (SATA_TRAN_ACCEPTED);
1358 		}
1359 #endif
1360 	}
1361 	/*NOTREACHED*/
1362 }
1363 
1364 
1365 /*
1366  * Called by sata module to abort outstanding packets.
1367  */
1368 /*ARGSUSED*/
1369 static int
1370 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1371 {
1372 	int cport = spkt->satapkt_device.satadev_addr.cport;
1373 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1374 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1375 	int c_a, ret;
1376 
1377 	ASSERT(cport < NV_MAX_PORTS(nvc));
1378 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1379 
1380 	mutex_enter(&nvp->nvp_mutex);
1381 
1382 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1383 		mutex_exit(&nvp->nvp_mutex);
1384 		nv_cmn_err(CE_WARN, nvc, nvp,
1385 		    "abort request failed: port inactive");
1386 
1387 		return (SATA_FAILURE);
1388 	}
1389 
1390 	/*
1391 	 * spkt == NULL then abort all commands
1392 	 */
1393 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1394 
1395 	if (c_a) {
1396 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1397 		    "packets aborted running=%d", c_a));
1398 		ret = SATA_SUCCESS;
1399 	} else {
1400 		if (spkt == NULL) {
1401 			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1402 		} else {
1403 			NVLOG((NVDBG_ENTRY, nvc, nvp,
1404 			    "can't find spkt to abort"));
1405 		}
1406 		ret = SATA_FAILURE;
1407 	}
1408 
1409 	mutex_exit(&nvp->nvp_mutex);
1410 
1411 	return (ret);
1412 }
1413 
1414 
1415 /*
1416  * if spkt == NULL abort all pkts running, otherwise
1417  * abort the requested packet.  must be called with nv_mutex
1418  * held and returns with it held.  Not NCQ aware.
1419  */
1420 static int
1421 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1422 {
1423 	int aborted = 0, i, reset_once = B_FALSE;
1424 	struct nv_slot *nv_slotp;
1425 	sata_pkt_t *spkt_slot;
1426 
1427 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1428 
1429 	/*
1430 	 * return if the port is not configured
1431 	 */
1432 	if (nvp->nvp_slot == NULL) {
1433 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1434 		    "nv_abort_active: not configured so returning"));
1435 
1436 		return (0);
1437 	}
1438 
1439 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1440 
1441 	nvp->nvp_state |= NV_PORT_ABORTING;
1442 
1443 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1444 
1445 		nv_slotp = &(nvp->nvp_slot[i]);
1446 		spkt_slot = nv_slotp->nvslot_spkt;
1447 
1448 		/*
1449 		 * skip if not active command in slot
1450 		 */
1451 		if (spkt_slot == NULL) {
1452 			continue;
1453 		}
1454 
1455 		/*
1456 		 * if a specific packet was requested, skip if
1457 		 * this is not a match
1458 		 */
1459 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1460 			continue;
1461 		}
1462 
1463 		/*
1464 		 * stop the hardware.  This could need reworking
1465 		 * when NCQ is enabled in the driver.
1466 		 */
1467 		if (reset_once == B_FALSE) {
1468 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1469 
1470 			/*
1471 			 * stop DMA engine
1472 			 */
1473 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1474 
1475 			nv_reset(nvp);
1476 			reset_once = B_TRUE;
1477 		}
1478 
1479 		spkt_slot->satapkt_reason = abort_reason;
1480 		nv_complete_io(nvp, spkt_slot, i);
1481 		aborted++;
1482 	}
1483 
1484 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1485 
1486 	return (aborted);
1487 }
1488 
1489 
1490 /*
1491  * Called by sata module to reset a port, device, or the controller.
1492  */
1493 static int
1494 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1495 {
1496 	int cport = sd->satadev_addr.cport;
1497 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1498 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1499 	int ret = SATA_SUCCESS;
1500 
1501 	ASSERT(cport < NV_MAX_PORTS(nvc));
1502 
1503 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1504 
1505 	mutex_enter(&nvp->nvp_mutex);
1506 
1507 	switch (sd->satadev_addr.qual) {
1508 
1509 	case SATA_ADDR_CPORT:
1510 		/*FALLTHROUGH*/
1511 	case SATA_ADDR_DCPORT:
1512 		nv_reset(nvp);
1513 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1514 
1515 		break;
1516 	case SATA_ADDR_CNTRL:
1517 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1518 		    "nv_sata_reset: constroller reset not supported"));
1519 
1520 		break;
1521 	case SATA_ADDR_PMPORT:
1522 	case SATA_ADDR_DPMPORT:
1523 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1524 		    "nv_sata_reset: port multipliers not supported"));
1525 		/*FALLTHROUGH*/
1526 	default:
1527 		/*
1528 		 * unsupported case
1529 		 */
1530 		ret = SATA_FAILURE;
1531 		break;
1532 	}
1533 
1534 	if (ret == SATA_SUCCESS) {
1535 		/*
1536 		 * If the port is inactive, do a quiet reset and don't attempt
1537 		 * to wait for reset completion or do any post reset processing
1538 		 */
1539 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1540 			nvp->nvp_state &= ~NV_PORT_RESET;
1541 			nvp->nvp_reset_time = 0;
1542 		}
1543 
1544 		/*
1545 		 * clear the port failed flag
1546 		 */
1547 		nvp->nvp_state &= ~NV_PORT_FAILED;
1548 	}
1549 
1550 	mutex_exit(&nvp->nvp_mutex);
1551 
1552 	return (ret);
1553 }
1554 
1555 
1556 /*
1557  * Sata entry point to handle port activation.  cfgadm -c connect
1558  */
1559 static int
1560 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1561 {
1562 	int cport = sd->satadev_addr.cport;
1563 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1564 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1565 
1566 	ASSERT(cport < NV_MAX_PORTS(nvc));
1567 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1568 
1569 	mutex_enter(&nvp->nvp_mutex);
1570 
1571 	sd->satadev_state = SATA_STATE_READY;
1572 
1573 	nv_copy_registers(nvp, sd, NULL);
1574 
1575 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1576 
1577 	nvp->nvp_state = 0;
1578 
1579 	mutex_exit(&nvp->nvp_mutex);
1580 
1581 	return (SATA_SUCCESS);
1582 }
1583 
1584 
1585 /*
1586  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1587  */
1588 static int
1589 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1590 {
1591 	int cport = sd->satadev_addr.cport;
1592 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1593 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1594 
1595 	ASSERT(cport < NV_MAX_PORTS(nvc));
1596 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1597 
1598 	mutex_enter(&nvp->nvp_mutex);
1599 
1600 	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1601 
1602 	/*
1603 	 * mark the device as inaccessible
1604 	 */
1605 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1606 
1607 	/*
1608 	 * disable the interrupts on port
1609 	 */
1610 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1611 
1612 	nv_uninit_port(nvp);
1613 
1614 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1615 	nv_copy_registers(nvp, sd, NULL);
1616 
1617 	mutex_exit(&nvp->nvp_mutex);
1618 
1619 	return (SATA_SUCCESS);
1620 }
1621 
1622 
1623 /*
1624  * find an empty slot in the driver's queue, increment counters,
1625  * and then invoke the appropriate PIO or DMA start routine.
1626  */
1627 static int
1628 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1629 {
1630 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1631 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1632 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1633 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1634 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1635 	nv_slot_t *nv_slotp;
1636 	boolean_t dma_cmd;
1637 
1638 	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1639 	    sata_cmdp->satacmd_cmd_reg));
1640 
1641 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1642 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1643 		nvp->nvp_ncq_run++;
1644 		/*
1645 		 * search for an empty NCQ slot.  by the time, it's already
1646 		 * been determined by the caller that there is room on the
1647 		 * queue.
1648 		 */
1649 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1650 		    on_bit <<= 1) {
1651 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1652 				break;
1653 			}
1654 		}
1655 
1656 		/*
1657 		 * the first empty slot found, should not exceed the queue
1658 		 * depth of the drive.  if it does it's an error.
1659 		 */
1660 		ASSERT(slot != nvp->nvp_queue_depth);
1661 
1662 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1663 		    nvp->nvp_sactive);
1664 		ASSERT((sactive & on_bit) == 0);
1665 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1666 		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1667 		    on_bit));
1668 		nvp->nvp_sactive_cache |= on_bit;
1669 
1670 		ncq = NVSLOT_NCQ;
1671 
1672 	} else {
1673 		nvp->nvp_non_ncq_run++;
1674 		slot = 0;
1675 	}
1676 
1677 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1678 
1679 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1680 
1681 	nv_slotp->nvslot_spkt = spkt;
1682 	nv_slotp->nvslot_flags = ncq;
1683 
1684 	/*
1685 	 * the sata module doesn't indicate which commands utilize the
1686 	 * DMA engine, so find out using this switch table.
1687 	 */
1688 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1689 	case SATAC_READ_DMA_EXT:
1690 	case SATAC_WRITE_DMA_EXT:
1691 	case SATAC_WRITE_DMA:
1692 	case SATAC_READ_DMA:
1693 	case SATAC_READ_DMA_QUEUED:
1694 	case SATAC_READ_DMA_QUEUED_EXT:
1695 	case SATAC_WRITE_DMA_QUEUED:
1696 	case SATAC_WRITE_DMA_QUEUED_EXT:
1697 	case SATAC_READ_FPDMA_QUEUED:
1698 	case SATAC_WRITE_FPDMA_QUEUED:
1699 		dma_cmd = B_TRUE;
1700 		break;
1701 	default:
1702 		dma_cmd = B_FALSE;
1703 	}
1704 
1705 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1706 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1707 		nv_slotp->nvslot_start = nv_start_dma;
1708 		nv_slotp->nvslot_intr = nv_intr_dma;
1709 	} else if (direction == SATA_DIR_NODATA_XFER) {
1710 		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
1711 		nv_slotp->nvslot_start = nv_start_nodata;
1712 		nv_slotp->nvslot_intr = nv_intr_nodata;
1713 	} else if (direction == SATA_DIR_READ) {
1714 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
1715 		nv_slotp->nvslot_start = nv_start_pio_in;
1716 		nv_slotp->nvslot_intr = nv_intr_pio_in;
1717 		nv_slotp->nvslot_byte_count =
1718 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1719 		nv_slotp->nvslot_v_addr =
1720 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1721 	} else if (direction == SATA_DIR_WRITE) {
1722 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
1723 		nv_slotp->nvslot_start = nv_start_pio_out;
1724 		nv_slotp->nvslot_intr = nv_intr_pio_out;
1725 		nv_slotp->nvslot_byte_count =
1726 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1727 		nv_slotp->nvslot_v_addr =
1728 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1729 	} else {
1730 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
1731 		    " %d cookies %d cmd %x",
1732 		    sata_cmdp->satacmd_flags.sata_data_direction,
1733 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
1734 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1735 		ret = SATA_TRAN_CMD_UNSUPPORTED;
1736 
1737 		goto fail;
1738 	}
1739 
1740 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
1741 	    SATA_TRAN_ACCEPTED) {
1742 		nv_slotp->nvslot_stime = ddi_get_lbolt();
1743 
1744 		/*
1745 		 * start timer if it's not already running and this packet
1746 		 * is not requesting polled mode.
1747 		 */
1748 		if ((nvp->nvp_timeout_id == 0) &&
1749 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
1750 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
1751 			    drv_usectohz(NV_ONE_SEC));
1752 		}
1753 
1754 		return (SATA_TRAN_ACCEPTED);
1755 	}
1756 
1757 	fail:
1758 
1759 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
1760 
1761 	if (ncq == NVSLOT_NCQ) {
1762 		nvp->nvp_ncq_run--;
1763 		nvp->nvp_sactive_cache &= ~on_bit;
1764 	} else {
1765 		nvp->nvp_non_ncq_run--;
1766 	}
1767 	nv_slotp->nvslot_spkt = NULL;
1768 	nv_slotp->nvslot_flags = 0;
1769 
1770 	return (ret);
1771 }
1772 
1773 
1774 /*
1775  * Check if the signature is ready and if non-zero translate
1776  * it into a solaris sata defined type.
1777  */
1778 static void
1779 nv_read_signature(nv_port_t *nvp)
1780 {
1781 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
1782 
1783 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
1784 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
1785 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
1786 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
1787 
1788 	switch (nvp->nvp_signature) {
1789 
1790 	case NV_SIG_DISK:
1791 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
1792 		nvp->nvp_type = SATA_DTYPE_ATADISK;
1793 		break;
1794 	case NV_SIG_ATAPI:
1795 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1796 		    "drive is an optical device"));
1797 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
1798 		break;
1799 	case NV_SIG_PM:
1800 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1801 		    "device is a port multiplier"));
1802 		nvp->nvp_type = SATA_DTYPE_PMULT;
1803 		break;
1804 	case NV_SIG_NOTREADY:
1805 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1806 		    "signature not ready"));
1807 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1808 		break;
1809 	default:
1810 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
1811 		    " recognized", nvp->nvp_signature);
1812 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1813 		break;
1814 	}
1815 
1816 	if (nvp->nvp_signature) {
1817 		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1818 	}
1819 }
1820 
1821 
1822 /*
1823  * Reset the port
1824  */
1825 static void
1826 nv_reset(nv_port_t *nvp)
1827 {
1828 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1829 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
1830 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1831 	uint32_t sctrl;
1832 
1833 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
1834 
1835 	ASSERT(mutex_owned(&nvp->nvp_mutex));
1836 
1837 	/*
1838 	 * clear signature registers
1839 	 */
1840 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
1841 	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
1842 	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
1843 	nv_put8(cmdhdl, nvp->nvp_count, 0);
1844 
1845 	nvp->nvp_signature = 0;
1846 	nvp->nvp_type = 0;
1847 	nvp->nvp_state |= NV_PORT_RESET;
1848 	nvp->nvp_reset_time = ddi_get_lbolt();
1849 	nvp->nvp_link_lost_time = 0;
1850 
1851 	/*
1852 	 * assert reset in PHY by writing a 1 to bit 0 scontrol
1853 	 */
1854 	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
1855 
1856 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
1857 
1858 	/*
1859 	 * wait 1ms
1860 	 */
1861 	drv_usecwait(1000);
1862 
1863 	/*
1864 	 * de-assert reset in PHY
1865 	 */
1866 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
1867 
1868 	/*
1869 	 * make sure timer is running
1870 	 */
1871 	if (nvp->nvp_timeout_id == 0) {
1872 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
1873 		    drv_usectohz(NV_ONE_SEC));
1874 	}
1875 }
1876 
1877 
1878 /*
1879  * Initialize register handling specific to mcp55
1880  */
1881 /* ARGSUSED */
1882 static void
1883 mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
1884 {
1885 	nv_port_t *nvp;
1886 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
1887 	uint8_t off, port;
1888 
1889 	nvc->nvc_mcp55_ctl = (uint32_t *)(bar5 + MCP55_CTL);
1890 	nvc->nvc_mcp55_ncq = (uint32_t *)(bar5 + MCP55_NCQ);
1891 
1892 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
1893 		nvp = &(nvc->nvc_port[port]);
1894 		nvp->nvp_mcp55_int_status =
1895 		    (uint16_t *)(bar5 + MCP55_INT_STATUS + off);
1896 		nvp->nvp_mcp55_int_ctl =
1897 		    (uint16_t *)(bar5 + MCP55_INT_CTL + off);
1898 
1899 		/*
1900 		 * clear any previous interrupts asserted
1901 		 */
1902 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_status,
1903 		    MCP55_INT_CLEAR);
1904 
1905 		/*
1906 		 * These are the interrupts to accept for now.  The spec
1907 		 * says these are enable bits, but nvidia has indicated
1908 		 * these are masking bits.  Even though they may be masked
1909 		 * out to prevent asserting the main interrupt, they can
1910 		 * still be asserted while reading the interrupt status
1911 		 * register, so that needs to be considered in the interrupt
1912 		 * handler.
1913 		 */
1914 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_ctl,
1915 		    ~(MCP55_INT_IGNORE));
1916 	}
1917 
1918 	/*
1919 	 * Allow the driver to program the BM on the first command instead
1920 	 * of waiting for an interrupt.
1921 	 */
1922 #ifdef NCQ
1923 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
1924 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq, flags);
1925 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
1926 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ctl, flags);
1927 #endif
1928 
1929 
1930 #if 0
1931 	/*
1932 	 * This caused problems on some but not all mcp55 based systems.
1933 	 * DMA writes would never complete.  This happens even on small
1934 	 * mem systems, and only setting NV_40BIT_PRD below and not
1935 	 * buffer_dma_attr.dma_attr_addr_hi, so it seems to be a hardware
1936 	 * issue that needs further investigation.
1937 	 */
1938 
1939 	/*
1940 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
1941 	 * Enable DMA to take advantage of that.
1942 	 *
1943 	 */
1944 	if (nvc->nvc_revid >= 0xa3) {
1945 		uint32_t reg32;
1946 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev id is %X and"
1947 		    " is capable of 40-bit addressing", nvc->nvc_revid));
1948 		buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull;
1949 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
1950 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
1951 		    reg32 |NV_40BIT_PRD);
1952 	} else {
1953 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is "
1954 		    "not capable of 40-bit addressing", nvc->nvc_revid));
1955 	}
1956 #endif
1957 
1958 }
1959 
1960 
1961 /*
1962  * Initialize register handling specific to mcp04
1963  */
1964 static void
1965 mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
1966 {
1967 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
1968 	uint32_t reg32;
1969 	uint16_t reg16;
1970 	nv_port_t *nvp;
1971 	int j;
1972 
1973 	/*
1974 	 * delay hotplug interrupts until PHYRDY.
1975 	 */
1976 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
1977 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
1978 	    reg32 | MCP04_CFG_DELAY_HOTPLUG_INTR);
1979 
1980 	/*
1981 	 * enable hot plug interrupts for channel x and y
1982 	 */
1983 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
1984 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
1985 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
1986 	    NV_HIRQ_EN | reg16);
1987 
1988 
1989 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
1990 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
1991 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
1992 	    NV_HIRQ_EN | reg16);
1993 
1994 	nvc->nvc_mcp04_int_status = (uint8_t *)(bar5 + MCP04_SATA_INT_STATUS);
1995 
1996 	/*
1997 	 * clear any existing interrupt pending then enable
1998 	 */
1999 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2000 		nvp = &(nvc->nvc_port[j]);
2001 		mutex_enter(&nvp->nvp_mutex);
2002 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2003 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2004 		mutex_exit(&nvp->nvp_mutex);
2005 	}
2006 }
2007 
2008 
2009 /*
2010  * Initialize the controller and set up driver data structures.
2011  * determine if ck804 or mcp55 class.
2012  */
2013 static int
2014 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2015 {
2016 	struct sata_hba_tran stran;
2017 	nv_port_t *nvp;
2018 	int j, ck804 = B_TRUE;
2019 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2020 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2021 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2022 	uint32_t reg32;
2023 	uint8_t reg8, reg8_save;
2024 
2025 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
2026 
2027 	/*
2028 	 * Need to set bit 2 to 1 at config offset 0x50
2029 	 * to enable access to the bar5 registers.
2030 	 */
2031 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2032 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2033 	    reg32 | NV_BAR5_SPACE_EN);
2034 
2035 	/*
2036 	 * Determine if this is ck804 or mcp55.  ck804 will map in the
2037 	 * task file registers into bar5 while mcp55 won't.  The offset of
2038 	 * the task file registers in mcp55's space is unused, so it will
2039 	 * return zero.  So check one of the task file registers to see if it is
2040 	 * writable and reads back what was written.  If it's mcp55 it will
2041 	 * return back 0xff whereas ck804 will return the value written.
2042 	 */
2043 	reg8_save = nv_get8(bar5_hdl,
2044 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2045 
2046 
2047 	for (j = 1; j < 3; j++) {
2048 
2049 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2050 		reg8 = nv_get8(bar5_hdl,
2051 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2052 
2053 		if (reg8 != j) {
2054 			ck804 = B_FALSE;
2055 			break;
2056 		}
2057 	}
2058 
2059 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2060 
2061 	if (ck804 == B_TRUE) {
2062 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2063 		nvc->nvc_interrupt = mcp04_intr;
2064 		nvc->nvc_reg_init = mcp04_reg_init;
2065 		nvc->nvc_set_intr = mcp04_set_intr;
2066 	} else {
2067 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP55"));
2068 		nvc->nvc_interrupt = mcp55_intr;
2069 		nvc->nvc_reg_init = mcp55_reg_init;
2070 		nvc->nvc_set_intr = mcp55_set_intr;
2071 	}
2072 
2073 
2074 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2075 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2076 	stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2077 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2078 	stran.sata_tran_hba_features_support =
2079 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN;
2080 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2081 	stran.sata_tran_probe_port = nv_sata_probe;
2082 	stran.sata_tran_start = nv_sata_start;
2083 	stran.sata_tran_abort = nv_sata_abort;
2084 	stran.sata_tran_reset_dport = nv_sata_reset;
2085 	stran.sata_tran_selftest = NULL;
2086 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2087 	stran.sata_tran_pwrmgt_ops = NULL;
2088 	stran.sata_tran_ioctl = NULL;
2089 	nvc->nvc_sata_hba_tran = stran;
2090 
2091 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2092 	    KM_SLEEP);
2093 
2094 	/*
2095 	 * initialize registers common to all chipsets
2096 	 */
2097 	nv_common_reg_init(nvc);
2098 
2099 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2100 		nvp = &(nvc->nvc_port[j]);
2101 
2102 		cmd_addr = nvp->nvp_cmd_addr;
2103 		ctl_addr = nvp->nvp_ctl_addr;
2104 		bm_addr = nvp->nvp_bm_addr;
2105 
2106 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2107 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2108 
2109 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2110 
2111 		nvp->nvp_data	= cmd_addr + NV_DATA;
2112 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2113 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2114 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2115 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2116 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2117 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2118 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2119 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2120 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2121 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2122 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2123 
2124 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2125 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2126 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2127 
2128 		nvp->nvp_state = 0;
2129 	}
2130 
2131 	/*
2132 	 * initialize register by calling chip specific reg initialization
2133 	 */
2134 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2135 
2136 	return (NV_SUCCESS);
2137 }
2138 
2139 
2140 /*
2141  * Initialize data structures with enough slots to handle queuing, if
2142  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2143  * NCQ support is built into the driver and enabled.  It might have been
2144  * better to derive the true size from the drive itself, but the sata
2145  * module only sends down that information on the first NCQ command,
2146  * which means possibly re-sizing the structures on an interrupt stack,
2147  * making error handling more messy.  The easy way is to just allocate
2148  * all 32 slots, which is what most drives support anyway.
2149  */
2150 static int
2151 nv_init_port(nv_port_t *nvp)
2152 {
2153 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2154 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2155 	dev_info_t *dip = nvc->nvc_dip;
2156 	ddi_device_acc_attr_t dev_attr;
2157 	size_t buf_size;
2158 	ddi_dma_cookie_t cookie;
2159 	uint_t count;
2160 	int rc, i;
2161 
2162 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2163 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2164 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2165 
2166 	if (nvp->nvp_state & NV_PORT_INIT) {
2167 		NVLOG((NVDBG_INIT, nvc, nvp,
2168 		    "nv_init_port previously initialized"));
2169 
2170 		return (NV_SUCCESS);
2171 	} else {
2172 		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2173 	}
2174 
2175 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2176 	    NV_QUEUE_SLOTS, KM_SLEEP);
2177 
2178 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2179 	    NV_QUEUE_SLOTS, KM_SLEEP);
2180 
2181 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2182 	    NV_QUEUE_SLOTS, KM_SLEEP);
2183 
2184 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2185 	    NV_QUEUE_SLOTS, KM_SLEEP);
2186 
2187 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2188 	    KM_SLEEP);
2189 
2190 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2191 
2192 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2193 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2194 
2195 		if (rc != DDI_SUCCESS) {
2196 			nv_uninit_port(nvp);
2197 
2198 			return (NV_FAILURE);
2199 		}
2200 
2201 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2202 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2203 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2204 		    &(nvp->nvp_sg_acc_hdl[i]));
2205 
2206 		if (rc != DDI_SUCCESS) {
2207 			nv_uninit_port(nvp);
2208 
2209 			return (NV_FAILURE);
2210 		}
2211 
2212 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2213 		    nvp->nvp_sg_addr[i], buf_size,
2214 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2215 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2216 
2217 		if (rc != DDI_DMA_MAPPED) {
2218 			nv_uninit_port(nvp);
2219 
2220 			return (NV_FAILURE);
2221 		}
2222 
2223 		ASSERT(count == 1);
2224 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2225 
2226 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2227 
2228 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2229 	}
2230 
2231 	/*
2232 	 * nvp_queue_depth represents the actual drive queue depth, not the
2233 	 * number of slots allocated in the structures (which may be more).
2234 	 * Actual queue depth is only learned after the first NCQ command, so
2235 	 * initialize it to 1 for now.
2236 	 */
2237 	nvp->nvp_queue_depth = 1;
2238 
2239 	nvp->nvp_state |= NV_PORT_INIT;
2240 
2241 	return (NV_SUCCESS);
2242 }
2243 
2244 
2245 /*
2246  * Free dynamically allocated structures for port.
2247  */
2248 static void
2249 nv_uninit_port(nv_port_t *nvp)
2250 {
2251 	int i;
2252 
2253 	/*
2254 	 * It is possible to reach here before a port has been initialized or
2255 	 * after it has already been uninitialized.  Just return in that case.
2256 	 */
2257 	if (nvp->nvp_slot == NULL) {
2258 
2259 		return;
2260 	}
2261 
2262 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2263 	    "nv_uninit_port uninitializing"));
2264 
2265 	nvp->nvp_type = SATA_DTYPE_NONE;
2266 
2267 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2268 		if (nvp->nvp_sg_paddr[i]) {
2269 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2270 		}
2271 
2272 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2273 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2274 		}
2275 
2276 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2277 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2278 		}
2279 	}
2280 
2281 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2282 	nvp->nvp_slot = NULL;
2283 
2284 	kmem_free(nvp->nvp_sg_dma_hdl,
2285 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2286 	nvp->nvp_sg_dma_hdl = NULL;
2287 
2288 	kmem_free(nvp->nvp_sg_acc_hdl,
2289 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2290 	nvp->nvp_sg_acc_hdl = NULL;
2291 
2292 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2293 	nvp->nvp_sg_addr = NULL;
2294 
2295 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2296 	nvp->nvp_sg_paddr = NULL;
2297 
2298 	nvp->nvp_state &= ~NV_PORT_INIT;
2299 	nvp->nvp_signature = 0;
2300 }
2301 
2302 
2303 /*
2304  * Cache register offsets and access handles to frequently accessed registers
2305  * which are common to either chipset.
2306  */
2307 static void
2308 nv_common_reg_init(nv_ctl_t *nvc)
2309 {
2310 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2311 	uchar_t *bm_addr_offset, *sreg_offset;
2312 	uint8_t bar, port;
2313 	nv_port_t *nvp;
2314 
2315 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2316 		if (port == 0) {
2317 			bar = NV_BAR_0;
2318 			bm_addr_offset = 0;
2319 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2320 		} else {
2321 			bar = NV_BAR_2;
2322 			bm_addr_offset = (uchar_t *)8;
2323 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2324 		}
2325 
2326 		nvp = &(nvc->nvc_port[port]);
2327 		nvp->nvp_ctlp = nvc;
2328 		nvp->nvp_port_num = port;
2329 		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2330 
2331 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2332 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2333 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2334 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2335 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2336 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2337 		    (long)bm_addr_offset;
2338 
2339 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2340 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2341 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2342 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2343 	}
2344 }
2345 
2346 
2347 static void
2348 nv_uninit_ctl(nv_ctl_t *nvc)
2349 {
2350 	int port;
2351 	nv_port_t *nvp;
2352 
2353 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2354 
2355 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2356 		nvp = &(nvc->nvc_port[port]);
2357 		mutex_enter(&nvp->nvp_mutex);
2358 		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2359 		nv_uninit_port(nvp);
2360 		mutex_exit(&nvp->nvp_mutex);
2361 		mutex_destroy(&nvp->nvp_mutex);
2362 		cv_destroy(&nvp->nvp_poll_cv);
2363 	}
2364 
2365 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2366 	nvc->nvc_port = NULL;
2367 }
2368 
2369 
2370 /*
2371  * mcp04 interrupt.  This is a wrapper around mcp04_intr_process so
2372  * that interrupts from other devices can be disregarded while dtracing.
2373  */
2374 /* ARGSUSED */
2375 static uint_t
2376 mcp04_intr(caddr_t arg1, caddr_t arg2)
2377 {
2378 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2379 	uint8_t intr_status;
2380 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2381 
2382 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2383 
2384 	if (intr_status == 0) {
2385 
2386 		return (DDI_INTR_UNCLAIMED);
2387 	}
2388 
2389 	mcp04_intr_process(nvc, intr_status);
2390 
2391 	return (DDI_INTR_CLAIMED);
2392 }
2393 
2394 
2395 /*
2396  * Main interrupt handler for ck804.  handles normal device
2397  * interrupts as well as port hot plug and remove interrupts.
2398  *
2399  */
2400 static void
2401 mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2402 {
2403 
2404 	int port, i;
2405 	nv_port_t *nvp;
2406 	nv_slot_t *nv_slotp;
2407 	uchar_t	status;
2408 	sata_pkt_t *spkt;
2409 	uint8_t bmstatus, clear_bits;
2410 	ddi_acc_handle_t bmhdl;
2411 	int nvcleared = 0;
2412 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2413 	uint32_t sstatus;
2414 	int port_mask_hot[] = {
2415 		MCP04_INT_PDEV_HOT, MCP04_INT_SDEV_HOT,
2416 	};
2417 	int port_mask_pm[] = {
2418 		MCP04_INT_PDEV_PM, MCP04_INT_SDEV_PM,
2419 	};
2420 
2421 	NVLOG((NVDBG_INTR, nvc, NULL,
2422 	    "mcp04_intr_process entered intr_status=%x", intr_status));
2423 
2424 	/*
2425 	 * For command completion interrupt, explicit clear is not required.
2426 	 * however, for the error cases explicit clear is performed.
2427 	 */
2428 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2429 
2430 		int port_mask[] = {MCP04_INT_PDEV_INT, MCP04_INT_SDEV_INT};
2431 
2432 		if ((port_mask[port] & intr_status) == 0) {
2433 			continue;
2434 		}
2435 
2436 		NVLOG((NVDBG_INTR, nvc, NULL,
2437 		    "mcp04_intr_process interrupt on port %d", port));
2438 
2439 		nvp = &(nvc->nvc_port[port]);
2440 
2441 		mutex_enter(&nvp->nvp_mutex);
2442 
2443 		/*
2444 		 * there was a corner case found where an interrupt
2445 		 * arrived before nvp_slot was set.  Should
2446 		 * probably should track down why that happens and try
2447 		 * to eliminate that source and then get rid of this
2448 		 * check.
2449 		 */
2450 		if (nvp->nvp_slot == NULL) {
2451 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2452 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2453 			    "received before initialization "
2454 			    "completed status=%x", status));
2455 			mutex_exit(&nvp->nvp_mutex);
2456 
2457 			/*
2458 			 * clear interrupt bits
2459 			 */
2460 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2461 			    port_mask[port]);
2462 
2463 			continue;
2464 		}
2465 
2466 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2467 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2468 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2469 			    " no command in progress status=%x", status));
2470 			mutex_exit(&nvp->nvp_mutex);
2471 
2472 			/*
2473 			 * clear interrupt bits
2474 			 */
2475 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2476 			    port_mask[port]);
2477 
2478 			continue;
2479 		}
2480 
2481 		bmhdl = nvp->nvp_bm_hdl;
2482 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2483 
2484 		if (!(bmstatus & BMISX_IDEINTS)) {
2485 			mutex_exit(&nvp->nvp_mutex);
2486 
2487 			continue;
2488 		}
2489 
2490 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2491 
2492 		if (status & SATA_STATUS_BSY) {
2493 			mutex_exit(&nvp->nvp_mutex);
2494 
2495 			continue;
2496 		}
2497 
2498 		nv_slotp = &(nvp->nvp_slot[0]);
2499 
2500 		ASSERT(nv_slotp);
2501 
2502 		spkt = nv_slotp->nvslot_spkt;
2503 
2504 		if (spkt == NULL) {
2505 			mutex_exit(&nvp->nvp_mutex);
2506 
2507 			continue;
2508 		}
2509 
2510 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2511 
2512 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2513 
2514 		/*
2515 		 * If there is no link cannot be certain about the completion
2516 		 * of the packet, so abort it.
2517 		 */
2518 		if (nv_check_link((&spkt->satapkt_device)->
2519 		    satadev_scr.sstatus) == B_FALSE) {
2520 
2521 			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2522 
2523 		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2524 
2525 			nv_complete_io(nvp, spkt, 0);
2526 		}
2527 
2528 		mutex_exit(&nvp->nvp_mutex);
2529 	}
2530 
2531 	/*
2532 	 * mcp04 often doesn't correctly distinguish hot add/remove
2533 	 * interrupts.  Frequently both the ADD and the REMOVE bits
2534 	 * are asserted, whether it was a remove or add.  Use sstatus
2535 	 * to distinguish hot add from hot remove.
2536 	 */
2537 
2538 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2539 		clear_bits = 0;
2540 
2541 		nvp = &(nvc->nvc_port[port]);
2542 		mutex_enter(&nvp->nvp_mutex);
2543 
2544 		if ((port_mask_pm[port] & intr_status) != 0) {
2545 			clear_bits = port_mask_pm[port];
2546 			NVLOG((NVDBG_HOT, nvc, nvp,
2547 			    "clearing PM interrupt bit: %x",
2548 			    intr_status & port_mask_pm[port]));
2549 		}
2550 
2551 		if ((port_mask_hot[port] & intr_status) == 0) {
2552 			if (clear_bits != 0) {
2553 				goto clear;
2554 			} else {
2555 				mutex_exit(&nvp->nvp_mutex);
2556 				continue;
2557 			}
2558 		}
2559 
2560 		/*
2561 		 * reaching here means there was a hot add or remove.
2562 		 */
2563 		clear_bits |= port_mask_hot[port];
2564 
2565 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2566 
2567 		sstatus = nv_get32(bar5_hdl,
2568 		    nvc->nvc_port[port].nvp_sstatus);
2569 
2570 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2571 		    SSTATUS_DET_DEVPRE_PHYCOM) {
2572 			nv_report_add_remove(nvp, 0);
2573 		} else {
2574 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2575 		}
2576 	clear:
2577 		/*
2578 		 * clear interrupt bits.  explicit interrupt clear is
2579 		 * required for hotplug interrupts.
2580 		 */
2581 		nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status, clear_bits);
2582 
2583 		/*
2584 		 * make sure it's flushed and cleared.  If not try
2585 		 * again.  Sometimes it has been observed to not clear
2586 		 * on the first try.
2587 		 */
2588 		intr_status = nv_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2589 
2590 		/*
2591 		 * make 10 additional attempts to clear the interrupt
2592 		 */
2593 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2594 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2595 			    "still not clear try=%d", intr_status,
2596 			    ++nvcleared));
2597 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2598 			    clear_bits);
2599 			intr_status = nv_get8(bar5_hdl,
2600 			    nvc->nvc_mcp04_int_status);
2601 		}
2602 
2603 		/*
2604 		 * if still not clear, log a message and disable the
2605 		 * port. highly unlikely that this path is taken, but it
2606 		 * gives protection against a wedged interrupt.
2607 		 */
2608 		if (intr_status & clear_bits) {
2609 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2610 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2611 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2612 			nvp->nvp_state |= NV_PORT_FAILED;
2613 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2614 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2615 			    "interrupt.  disabling port intr_status=%X",
2616 			    intr_status);
2617 		}
2618 
2619 		mutex_exit(&nvp->nvp_mutex);
2620 	}
2621 }
2622 
2623 
2624 /*
2625  * Interrupt handler for mcp55.  It is invoked by the wrapper for each port
2626  * on the controller, to handle completion and hot plug and remove events.
2627  *
2628  */
2629 static uint_t
2630 mcp55_intr_port(nv_port_t *nvp)
2631 {
2632 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2633 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2634 	uint8_t clear = 0, intr_cycles = 0;
2635 	int ret = DDI_INTR_UNCLAIMED;
2636 	uint16_t int_status;
2637 
2638 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
2639 
2640 	for (;;) {
2641 		/*
2642 		 * read current interrupt status
2643 		 */
2644 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_status);
2645 
2646 		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2647 
2648 		/*
2649 		 * MCP55_INT_IGNORE interrupts will show up in the status,
2650 		 * but are masked out from causing an interrupt to be generated
2651 		 * to the processor.  Ignore them here by masking them out.
2652 		 */
2653 		int_status &= ~(MCP55_INT_IGNORE);
2654 
2655 		/*
2656 		 * exit the loop when no more interrupts to process
2657 		 */
2658 		if (int_status == 0) {
2659 
2660 			break;
2661 		}
2662 
2663 		if (int_status & MCP55_INT_COMPLETE) {
2664 			NVLOG((NVDBG_INTR, nvc, nvp,
2665 			    "mcp55_packet_complete_intr"));
2666 			/*
2667 			 * since int_status was set, return DDI_INTR_CLAIMED
2668 			 * from the DDI's perspective even though the packet
2669 			 * completion may not have succeeded.  If it fails,
2670 			 * need to manually clear the interrupt, otherwise
2671 			 * clearing is implicit.
2672 			 */
2673 			ret = DDI_INTR_CLAIMED;
2674 			if (mcp55_packet_complete_intr(nvc, nvp) ==
2675 			    NV_FAILURE) {
2676 				clear = MCP55_INT_COMPLETE;
2677 			}
2678 		}
2679 
2680 		if (int_status & MCP55_INT_DMA_SETUP) {
2681 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr"));
2682 
2683 			/*
2684 			 * Needs to be cleared before starting the BM, so do it
2685 			 * now.  make sure this is still working.
2686 			 */
2687 			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status,
2688 			    MCP55_INT_DMA_SETUP);
2689 #ifdef NCQ
2690 			ret = mcp55_dma_setup_intr(nvc, nvp);
2691 #endif
2692 		}
2693 
2694 		if (int_status & MCP55_INT_REM) {
2695 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55 device removed"));
2696 			clear = MCP55_INT_REM;
2697 			ret = DDI_INTR_CLAIMED;
2698 
2699 			mutex_enter(&nvp->nvp_mutex);
2700 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2701 			mutex_exit(&nvp->nvp_mutex);
2702 
2703 		} else if (int_status & MCP55_INT_ADD) {
2704 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp55 device added"));
2705 			clear = MCP55_INT_ADD;
2706 			ret = DDI_INTR_CLAIMED;
2707 
2708 			mutex_enter(&nvp->nvp_mutex);
2709 			nv_report_add_remove(nvp, 0);
2710 			mutex_exit(&nvp->nvp_mutex);
2711 		}
2712 
2713 		if (clear) {
2714 			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, clear);
2715 			clear = 0;
2716 		}
2717 
2718 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
2719 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
2720 			    "processing.  Disabling port int_status=%X"
2721 			    " clear=%X", int_status, clear);
2722 			mutex_enter(&nvp->nvp_mutex);
2723 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2724 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2725 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2726 			nvp->nvp_state |= NV_PORT_FAILED;
2727 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2728 			mutex_exit(&nvp->nvp_mutex);
2729 		}
2730 	}
2731 
2732 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
2733 
2734 	return (ret);
2735 }
2736 
2737 
2738 /* ARGSUSED */
2739 static uint_t
2740 mcp55_intr(caddr_t arg1, caddr_t arg2)
2741 {
2742 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2743 	int ret;
2744 
2745 	ret = mcp55_intr_port(&(nvc->nvc_port[0]));
2746 	ret |= mcp55_intr_port(&(nvc->nvc_port[1]));
2747 
2748 	return (ret);
2749 }
2750 
2751 
2752 #ifdef NCQ
2753 /*
2754  * with software driven NCQ on mcp55, an interrupt occurs right
2755  * before the drive is ready to do a DMA transfer.  At this point,
2756  * the PRD table needs to be programmed and the DMA engine enabled
2757  * and ready to go.
2758  *
2759  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
2760  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
2761  * -- clear bit 0 of master command reg
2762  * -- program PRD
2763  * -- clear the interrupt status bit for the DMA Setup FIS
2764  * -- set bit 0 of the bus master command register
2765  */
2766 static int
2767 mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
2768 {
2769 	int slot;
2770 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
2771 	uint8_t bmicx;
2772 	int port = nvp->nvp_port_num;
2773 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
2774 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
2775 
2776 	nv_cmn_err(CE_PANIC, nvc, nvp,
2777 	    "this is should not be executed at all until NCQ");
2778 
2779 	mutex_enter(&nvp->nvp_mutex);
2780 
2781 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq);
2782 
2783 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
2784 
2785 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr slot %d"
2786 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
2787 
2788 	/*
2789 	 * halt the DMA engine.  This step is necessary according to
2790 	 * the mcp55 spec, probably since there may have been a "first" packet
2791 	 * that already programmed the DMA engine, but may not turn out to
2792 	 * be the first one processed.
2793 	 */
2794 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
2795 
2796 #if 0
2797 	if (bmicx & BMICX_SSBM) {
2798 		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
2799 		    "another packet.  Cancelling and reprogramming"));
2800 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2801 	}
2802 #endif
2803 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2804 
2805 	nv_start_dma_engine(nvp, slot);
2806 
2807 	mutex_exit(&nvp->nvp_mutex);
2808 
2809 	return (DDI_INTR_CLAIMED);
2810 }
2811 #endif /* NCQ */
2812 
2813 
2814 /*
2815  * packet completion interrupt.  If the packet is complete, invoke
2816  * the packet completion callback.
2817  */
2818 static int
2819 mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
2820 {
2821 	uint8_t status, bmstatus;
2822 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
2823 	int sactive;
2824 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
2825 	sata_pkt_t *spkt;
2826 	nv_slot_t *nv_slotp;
2827 
2828 	mutex_enter(&nvp->nvp_mutex);
2829 
2830 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2831 
2832 	if (!(bmstatus & BMISX_IDEINTS)) {
2833 		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
2834 		mutex_exit(&nvp->nvp_mutex);
2835 
2836 		return (NV_FAILURE);
2837 	}
2838 
2839 	/*
2840 	 * If the just completed item is a non-ncq command, the busy
2841 	 * bit should not be set
2842 	 */
2843 	if (nvp->nvp_non_ncq_run) {
2844 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2845 		if (status & SATA_STATUS_BSY) {
2846 			nv_cmn_err(CE_WARN, nvc, nvp,
2847 			    "unexpected SATA_STATUS_BSY set");
2848 			mutex_exit(&nvp->nvp_mutex);
2849 			/*
2850 			 * calling function will clear interrupt.  then
2851 			 * the real interrupt will either arrive or the
2852 			 * packet timeout handling will take over and
2853 			 * reset.
2854 			 */
2855 			return (NV_FAILURE);
2856 		}
2857 
2858 	} else {
2859 		/*
2860 		 * NCQ check for BSY here and wait if still bsy before
2861 		 * continuing. Rather than wait for it to be cleared
2862 		 * when starting a packet and wasting CPU time, the starting
2863 		 * thread can exit immediate, but might have to spin here
2864 		 * for a bit possibly.  Needs more work and experimentation.
2865 		 */
2866 		ASSERT(nvp->nvp_ncq_run);
2867 	}
2868 
2869 
2870 	if (nvp->nvp_ncq_run) {
2871 		ncq_command = B_TRUE;
2872 		ASSERT(nvp->nvp_non_ncq_run == 0);
2873 	} else {
2874 		ASSERT(nvp->nvp_non_ncq_run != 0);
2875 	}
2876 
2877 	/*
2878 	 * active_pkt_bit will represent the bitmap of the single completed
2879 	 * packet.  Because of the nature of sw assisted NCQ, only one
2880 	 * command will complete per interrupt.
2881 	 */
2882 
2883 	if (ncq_command == B_FALSE) {
2884 		active_pkt = 0;
2885 	} else {
2886 		/*
2887 		 * NCQ: determine which command just completed, by examining
2888 		 * which bit cleared in the register since last written.
2889 		 */
2890 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
2891 
2892 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
2893 
2894 		ASSERT(active_pkt_bit);
2895 
2896 
2897 		/*
2898 		 * this failure path needs more work to handle the
2899 		 * error condition and recovery.
2900 		 */
2901 		if (active_pkt_bit == 0) {
2902 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2903 
2904 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
2905 			    "nvp->nvp_sactive %X", sactive,
2906 			    nvp->nvp_sactive_cache);
2907 
2908 			(void) nv_get8(cmdhdl, nvp->nvp_status);
2909 
2910 			mutex_exit(&nvp->nvp_mutex);
2911 
2912 			return (NV_FAILURE);
2913 		}
2914 
2915 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
2916 		    active_pkt++, active_pkt_bit >>= 1) {
2917 		}
2918 
2919 		/*
2920 		 * make sure only one bit is ever turned on
2921 		 */
2922 		ASSERT(active_pkt_bit == 1);
2923 
2924 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
2925 	}
2926 
2927 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
2928 
2929 	spkt = nv_slotp->nvslot_spkt;
2930 
2931 	ASSERT(spkt != NULL);
2932 
2933 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2934 
2935 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2936 
2937 	/*
2938 	 * If there is no link cannot be certain about the completion
2939 	 * of the packet, so abort it.
2940 	 */
2941 	if (nv_check_link((&spkt->satapkt_device)->
2942 	    satadev_scr.sstatus) == B_FALSE) {
2943 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2944 
2945 	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2946 
2947 		nv_complete_io(nvp, spkt, active_pkt);
2948 	}
2949 
2950 	mutex_exit(&nvp->nvp_mutex);
2951 
2952 	return (NV_SUCCESS);
2953 }
2954 
2955 
2956 static void
2957 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
2958 {
2959 
2960 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
2961 
2962 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
2963 		nvp->nvp_ncq_run--;
2964 	} else {
2965 		nvp->nvp_non_ncq_run--;
2966 	}
2967 
2968 	/*
2969 	 * mark the packet slot idle so it can be reused.  Do this before
2970 	 * calling satapkt_comp so the slot can be reused.
2971 	 */
2972 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
2973 
2974 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
2975 		/*
2976 		 * If this is not timed polled mode cmd, which has an
2977 		 * active thread monitoring for completion, then need
2978 		 * to signal the sleeping thread that the cmd is complete.
2979 		 */
2980 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
2981 			cv_signal(&nvp->nvp_poll_cv);
2982 		}
2983 
2984 		return;
2985 	}
2986 
2987 	if (spkt->satapkt_comp != NULL) {
2988 		mutex_exit(&nvp->nvp_mutex);
2989 		(*spkt->satapkt_comp)(spkt);
2990 		mutex_enter(&nvp->nvp_mutex);
2991 	}
2992 }
2993 
2994 
2995 /*
2996  * check whether packet is ncq command or not.  for ncq command,
2997  * start it if there is still room on queue.  for non-ncq command only
2998  * start if no other command is running.
2999  */
3000 static int
3001 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3002 {
3003 	uint8_t cmd, ncq;
3004 
3005 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
3006 
3007 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3008 
3009 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3010 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3011 
3012 	if (ncq == B_FALSE) {
3013 
3014 		if ((nvp->nvp_non_ncq_run == 1) ||
3015 		    (nvp->nvp_ncq_run > 0)) {
3016 			/*
3017 			 * next command is non-ncq which can't run
3018 			 * concurrently.  exit and return queue full.
3019 			 */
3020 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3021 
3022 			return (SATA_TRAN_QUEUE_FULL);
3023 		}
3024 
3025 		return (nv_start_common(nvp, spkt));
3026 	}
3027 
3028 	/*
3029 	 * ncq == B_TRUE
3030 	 */
3031 	if (nvp->nvp_non_ncq_run == 1) {
3032 		/*
3033 		 * cannot start any NCQ commands when there
3034 		 * is a non-NCQ command running.
3035 		 */
3036 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3037 
3038 		return (SATA_TRAN_QUEUE_FULL);
3039 	}
3040 
3041 #ifdef NCQ
3042 	/*
3043 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3044 	 * is being pulled out until NCQ support is later addressed
3045 	 *
3046 	 * nvp_queue_depth is initialized by the first NCQ command
3047 	 * received.
3048 	 */
3049 	if (nvp->nvp_queue_depth == 1) {
3050 		nvp->nvp_queue_depth =
3051 		    spkt->satapkt_device.satadev_qdepth;
3052 
3053 		ASSERT(nvp->nvp_queue_depth > 1);
3054 
3055 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3056 		    "nv_process_queue: nvp_queue_depth set to %d",
3057 		    nvp->nvp_queue_depth));
3058 	}
3059 #endif
3060 
3061 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3062 		/*
3063 		 * max number of NCQ commands already active
3064 		 */
3065 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3066 
3067 		return (SATA_TRAN_QUEUE_FULL);
3068 	}
3069 
3070 	return (nv_start_common(nvp, spkt));
3071 }
3072 
3073 
3074 /*
3075  * configure INTx and legacy interrupts
3076  */
3077 static int
3078 nv_add_legacy_intrs(nv_ctl_t *nvc)
3079 {
3080 	dev_info_t	*devinfo = nvc->nvc_dip;
3081 	int		actual, count = 0;
3082 	int		x, y, rc, inum = 0;
3083 
3084 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3085 
3086 	/*
3087 	 * get number of interrupts
3088 	 */
3089 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3090 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3091 		NVLOG((NVDBG_INTR, nvc, NULL,
3092 		    "ddi_intr_get_nintrs() failed, "
3093 		    "rc %d count %d", rc, count));
3094 
3095 		return (DDI_FAILURE);
3096 	}
3097 
3098 	/*
3099 	 * allocate an array of interrupt handles
3100 	 */
3101 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3102 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3103 
3104 	/*
3105 	 * call ddi_intr_alloc()
3106 	 */
3107 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3108 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3109 
3110 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3111 		nv_cmn_err(CE_WARN, nvc, NULL,
3112 		    "ddi_intr_alloc() failed, rc %d", rc);
3113 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3114 
3115 		return (DDI_FAILURE);
3116 	}
3117 
3118 	if (actual < count) {
3119 		nv_cmn_err(CE_WARN, nvc, NULL,
3120 		    "ddi_intr_alloc: requested: %d, received: %d",
3121 		    count, actual);
3122 
3123 		goto failure;
3124 	}
3125 
3126 	nvc->nvc_intr_cnt = actual;
3127 
3128 	/*
3129 	 * get intr priority
3130 	 */
3131 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3132 	    DDI_SUCCESS) {
3133 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3134 
3135 		goto failure;
3136 	}
3137 
3138 	/*
3139 	 * Test for high level mutex
3140 	 */
3141 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3142 		nv_cmn_err(CE_WARN, nvc, NULL,
3143 		    "nv_add_legacy_intrs: high level intr not supported");
3144 
3145 		goto failure;
3146 	}
3147 
3148 	for (x = 0; x < actual; x++) {
3149 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3150 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3151 			nv_cmn_err(CE_WARN, nvc, NULL,
3152 			    "ddi_intr_add_handler() failed");
3153 
3154 			goto failure;
3155 		}
3156 	}
3157 
3158 	/*
3159 	 * call ddi_intr_enable() for legacy interrupts
3160 	 */
3161 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3162 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3163 	}
3164 
3165 	return (DDI_SUCCESS);
3166 
3167 	failure:
3168 	/*
3169 	 * free allocated intr and nvc_htable
3170 	 */
3171 	for (y = 0; y < actual; y++) {
3172 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3173 	}
3174 
3175 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3176 
3177 	return (DDI_FAILURE);
3178 }
3179 
3180 #ifdef	NV_MSI_SUPPORTED
3181 /*
3182  * configure MSI interrupts
3183  */
3184 static int
3185 nv_add_msi_intrs(nv_ctl_t *nvc)
3186 {
3187 	dev_info_t	*devinfo = nvc->nvc_dip;
3188 	int		count, avail, actual;
3189 	int		x, y, rc, inum = 0;
3190 
3191 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3192 
3193 	/*
3194 	 * get number of interrupts
3195 	 */
3196 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3197 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3198 		nv_cmn_err(CE_WARN, nvc, NULL,
3199 		    "ddi_intr_get_nintrs() failed, "
3200 		    "rc %d count %d", rc, count);
3201 
3202 		return (DDI_FAILURE);
3203 	}
3204 
3205 	/*
3206 	 * get number of available interrupts
3207 	 */
3208 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3209 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3210 		nv_cmn_err(CE_WARN, nvc, NULL,
3211 		    "ddi_intr_get_navail() failed, "
3212 		    "rc %d avail %d", rc, avail);
3213 
3214 		return (DDI_FAILURE);
3215 	}
3216 
3217 	if (avail < count) {
3218 		nv_cmn_err(CE_WARN, nvc, NULL,
3219 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3220 		    avail, count);
3221 	}
3222 
3223 	/*
3224 	 * allocate an array of interrupt handles
3225 	 */
3226 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3227 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3228 
3229 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3230 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3231 
3232 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3233 		nv_cmn_err(CE_WARN, nvc, NULL,
3234 		    "ddi_intr_alloc() failed, rc %d", rc);
3235 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3236 
3237 		return (DDI_FAILURE);
3238 	}
3239 
3240 	/*
3241 	 * Use interrupt count returned or abort?
3242 	 */
3243 	if (actual < count) {
3244 		NVLOG((NVDBG_INIT, nvc, NULL,
3245 		    "Requested: %d, Received: %d", count, actual));
3246 	}
3247 
3248 	nvc->nvc_intr_cnt = actual;
3249 
3250 	/*
3251 	 * get priority for first msi, assume remaining are all the same
3252 	 */
3253 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3254 	    DDI_SUCCESS) {
3255 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3256 
3257 		goto failure;
3258 	}
3259 
3260 	/*
3261 	 * test for high level mutex
3262 	 */
3263 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3264 		nv_cmn_err(CE_WARN, nvc, NULL,
3265 		    "nv_add_msi_intrs: high level intr not supported");
3266 
3267 		goto failure;
3268 	}
3269 
3270 	/*
3271 	 * Call ddi_intr_add_handler()
3272 	 */
3273 	for (x = 0; x < actual; x++) {
3274 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3275 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3276 			nv_cmn_err(CE_WARN, nvc, NULL,
3277 			    "ddi_intr_add_handler() failed");
3278 
3279 			goto failure;
3280 		}
3281 	}
3282 
3283 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3284 
3285 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3286 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3287 		    nvc->nvc_intr_cnt);
3288 	} else {
3289 		/*
3290 		 * Call ddi_intr_enable() for MSI non block enable
3291 		 */
3292 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3293 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3294 		}
3295 	}
3296 
3297 	return (DDI_SUCCESS);
3298 
3299 	failure:
3300 	/*
3301 	 * free allocated intr and nvc_htable
3302 	 */
3303 	for (y = 0; y < actual; y++) {
3304 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3305 	}
3306 
3307 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3308 
3309 	return (DDI_FAILURE);
3310 }
3311 #endif
3312 
3313 
3314 static void
3315 nv_rem_intrs(nv_ctl_t *nvc)
3316 {
3317 	int x, i;
3318 	nv_port_t *nvp;
3319 
3320 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3321 
3322 	/*
3323 	 * prevent controller from generating interrupts by
3324 	 * masking them out.  This is an extra precaution.
3325 	 */
3326 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3327 		nvp = (&nvc->nvc_port[i]);
3328 		mutex_enter(&nvp->nvp_mutex);
3329 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3330 		mutex_exit(&nvp->nvp_mutex);
3331 	}
3332 
3333 	/*
3334 	 * disable all interrupts
3335 	 */
3336 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3337 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3338 		(void) ddi_intr_block_disable(nvc->nvc_htable,
3339 		    nvc->nvc_intr_cnt);
3340 	} else {
3341 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3342 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3343 		}
3344 	}
3345 
3346 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3347 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3348 		(void) ddi_intr_free(nvc->nvc_htable[x]);
3349 	}
3350 
3351 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3352 }
3353 
3354 
3355 /*
3356  * variable argument wrapper for cmn_err.  prefixes the instance and port
3357  * number if possible
3358  */
3359 static void
3360 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3361 {
3362 	char port[NV_STRING_10];
3363 	char inst[NV_STRING_10];
3364 
3365 	mutex_enter(&nv_log_mutex);
3366 
3367 	if (nvc) {
3368 		(void) snprintf(inst, NV_STRING_10, "inst %d",
3369 		    ddi_get_instance(nvc->nvc_dip));
3370 	} else {
3371 		inst[0] = '\0';
3372 	}
3373 
3374 	if (nvp) {
3375 		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3376 	} else {
3377 		port[0] = '\0';
3378 	}
3379 
3380 	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3381 	    (inst[0]|port[0] ? ": " :""));
3382 
3383 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3384 	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3385 
3386 	/*
3387 	 * normally set to log to console but in some debug situations it
3388 	 * may be useful to log only to a file.
3389 	 */
3390 	if (nv_log_to_console) {
3391 		if (nv_prom_print) {
3392 			prom_printf("%s\n", nv_log_buf);
3393 		} else {
3394 			cmn_err(ce, "%s", nv_log_buf);
3395 		}
3396 
3397 
3398 	} else {
3399 		cmn_err(ce, "!%s", nv_log_buf);
3400 	}
3401 
3402 	mutex_exit(&nv_log_mutex);
3403 }
3404 
3405 
3406 /*
3407  * wrapper for cmn_err
3408  */
3409 static void
3410 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3411 {
3412 	va_list ap;
3413 
3414 	va_start(ap, fmt);
3415 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3416 	va_end(ap);
3417 }
3418 
3419 
3420 #if defined(DEBUG)
3421 /*
3422  * prefixes the instance and port number if possible to the debug message
3423  */
3424 static void
3425 nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3426 {
3427 	va_list ap;
3428 
3429 	if ((nv_debug_flags & flag) == 0) {
3430 		return;
3431 	}
3432 
3433 	va_start(ap, fmt);
3434 	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3435 	va_end(ap);
3436 
3437 	/*
3438 	 * useful for some debugging situations
3439 	 */
3440 	if (nv_log_delay) {
3441 		drv_usecwait(nv_log_delay);
3442 	}
3443 
3444 }
3445 #endif /* DEBUG */
3446 
3447 
3448 /*
3449  * program registers which are common to all commands
3450  */
3451 static void
3452 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3453 {
3454 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3455 	sata_pkt_t *spkt;
3456 	sata_cmd_t *satacmd;
3457 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3458 	uint8_t cmd, ncq = B_FALSE;
3459 
3460 	spkt = nv_slotp->nvslot_spkt;
3461 	satacmd = &spkt->satapkt_cmd;
3462 	cmd = satacmd->satacmd_cmd_reg;
3463 
3464 	ASSERT(nvp->nvp_slot);
3465 
3466 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3467 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3468 		ncq = B_TRUE;
3469 	}
3470 
3471 	/*
3472 	 * select the drive
3473 	 */
3474 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3475 
3476 	/*
3477 	 * make certain the drive selected
3478 	 */
3479 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3480 	    NV_SEC2USEC(5), 0) == B_FALSE) {
3481 
3482 		return;
3483 	}
3484 
3485 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3486 
3487 	case ATA_ADDR_LBA:
3488 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3489 
3490 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3491 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3492 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3493 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3494 
3495 		break;
3496 
3497 	case ATA_ADDR_LBA28:
3498 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3499 		    "ATA_ADDR_LBA28 mode"));
3500 		/*
3501 		 * NCQ only uses 48-bit addressing
3502 		 */
3503 		ASSERT(ncq != B_TRUE);
3504 
3505 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3506 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3507 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3508 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3509 
3510 		break;
3511 
3512 	case ATA_ADDR_LBA48:
3513 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3514 		    "ATA_ADDR_LBA48 mode"));
3515 
3516 		/*
3517 		 * for NCQ, tag goes into count register and real sector count
3518 		 * into features register.  The sata module does the translation
3519 		 * in the satacmd.
3520 		 */
3521 		if (ncq == B_TRUE) {
3522 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3523 			nv_put8(cmdhdl, nvp->nvp_feature,
3524 			    satacmd->satacmd_features_reg_ext);
3525 			nv_put8(cmdhdl, nvp->nvp_feature,
3526 			    satacmd->satacmd_features_reg);
3527 		} else {
3528 			nv_put8(cmdhdl, nvp->nvp_count,
3529 			    satacmd->satacmd_sec_count_msb);
3530 			nv_put8(cmdhdl, nvp->nvp_count,
3531 			    satacmd->satacmd_sec_count_lsb);
3532 		}
3533 
3534 		/*
3535 		 * send the high-order half first
3536 		 */
3537 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3538 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3539 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3540 		/*
3541 		 * Send the low-order half
3542 		 */
3543 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3544 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3545 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3546 
3547 		break;
3548 
3549 	case 0:
3550 		/*
3551 		 * non-media access commands such as identify and features
3552 		 * take this path.
3553 		 */
3554 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3555 		nv_put8(cmdhdl, nvp->nvp_feature,
3556 		    satacmd->satacmd_features_reg);
3557 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3558 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3559 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3560 
3561 		break;
3562 
3563 	default:
3564 		break;
3565 	}
3566 
3567 	ASSERT(nvp->nvp_slot);
3568 }
3569 
3570 
3571 /*
3572  * start a command that involves no media access
3573  */
3574 static int
3575 nv_start_nodata(nv_port_t *nvp, int slot)
3576 {
3577 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3578 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3579 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3580 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3581 
3582 	nv_program_taskfile_regs(nvp, slot);
3583 
3584 	/*
3585 	 * This next one sets the controller in motion
3586 	 */
3587 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3588 
3589 	return (SATA_TRAN_ACCEPTED);
3590 }
3591 
3592 
3593 int
3594 nv_bm_status_clear(nv_port_t *nvp)
3595 {
3596 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3597 	uchar_t	status, ret;
3598 
3599 	/*
3600 	 * Get the current BM status
3601 	 */
3602 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3603 
3604 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3605 
3606 	/*
3607 	 * Clear the latches (and preserve the other bits)
3608 	 */
3609 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3610 
3611 	return (ret);
3612 }
3613 
3614 
3615 /*
3616  * program the bus master DMA engine with the PRD address for
3617  * the active slot command, and start the DMA engine.
3618  */
3619 static void
3620 nv_start_dma_engine(nv_port_t *nvp, int slot)
3621 {
3622 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3623 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3624 	uchar_t direction;
3625 
3626 	ASSERT(nv_slotp->nvslot_spkt != NULL);
3627 
3628 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3629 	    == SATA_DIR_READ) {
3630 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3631 	} else {
3632 		direction = BMICX_RWCON_READ_FROM_MEMORY;
3633 	}
3634 
3635 	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3636 	    "nv_start_dma_engine entered"));
3637 
3638 	/*
3639 	 * reset the controller's interrupt and error status bits
3640 	 */
3641 	(void) nv_bm_status_clear(nvp);
3642 
3643 	/*
3644 	 * program the PRD table physical start address
3645 	 */
3646 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3647 
3648 	/*
3649 	 * set the direction control and start the DMA controller
3650 	 */
3651 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3652 }
3653 
3654 /*
3655  * start dma command, either in or out
3656  */
3657 static int
3658 nv_start_dma(nv_port_t *nvp, int slot)
3659 {
3660 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3661 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3662 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3663 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3664 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3665 #ifdef NCQ
3666 	uint8_t ncq = B_FALSE;
3667 #endif
3668 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3669 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3670 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3671 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
3672 
3673 	ASSERT(sg_count != 0);
3674 
3675 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
3676 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
3677 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
3678 		    sata_cmdp->satacmd_num_dma_cookies);
3679 
3680 		return (NV_FAILURE);
3681 	}
3682 
3683 	nv_program_taskfile_regs(nvp, slot);
3684 
3685 	/*
3686 	 * start the drive in motion
3687 	 */
3688 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
3689 
3690 	/*
3691 	 * the drive starts processing the transaction when the cmd register
3692 	 * is written.  This is done here before programming the DMA engine to
3693 	 * parallelize and save some time.  In the event that the drive is ready
3694 	 * before DMA, it will wait.
3695 	 */
3696 #ifdef NCQ
3697 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3698 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3699 		ncq = B_TRUE;
3700 	}
3701 #endif
3702 
3703 	/*
3704 	 * copy the PRD list to PRD table in DMA accessible memory
3705 	 * so that the controller can access it.
3706 	 */
3707 	for (idx = 0; idx < sg_count; idx++, srcp++) {
3708 		uint32_t size;
3709 
3710 		ASSERT(srcp->dmac_size <= UINT16_MAX);
3711 
3712 		nv_put32(sghdl, dstp++, srcp->dmac_address);
3713 
3714 		size = srcp->dmac_size;
3715 
3716 		/*
3717 		 * If this is a 40-bit address, copy bits 32-40 of the
3718 		 * physical address to bits 16-24 of the PRD count.
3719 		 */
3720 		if (srcp->dmac_laddress > UINT32_MAX) {
3721 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
3722 		}
3723 
3724 		/*
3725 		 * set the end of table flag for the last entry
3726 		 */
3727 		if (idx == (sg_count - 1)) {
3728 			size |= PRDE_EOT;
3729 		}
3730 
3731 		nv_put32(sghdl, dstp++, size);
3732 	}
3733 
3734 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
3735 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
3736 
3737 	nv_start_dma_engine(nvp, slot);
3738 
3739 #ifdef NCQ
3740 	/*
3741 	 * optimization:  for SWNCQ, start DMA engine if this is the only
3742 	 * command running.  Preliminary NCQ efforts indicated this needs
3743 	 * more debugging.
3744 	 *
3745 	 * if (nvp->nvp_ncq_run <= 1)
3746 	 */
3747 
3748 	if (ncq == B_FALSE) {
3749 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3750 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
3751 		    " cmd = %X", non_ncq_commands++, cmd));
3752 		nv_start_dma_engine(nvp, slot);
3753 	} else {
3754 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
3755 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
3756 	}
3757 #endif /* NCQ */
3758 
3759 	return (SATA_TRAN_ACCEPTED);
3760 }
3761 
3762 
3763 /*
3764  * start a PIO data-in ATA command
3765  */
3766 static int
3767 nv_start_pio_in(nv_port_t *nvp, int slot)
3768 {
3769 
3770 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3771 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3772 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3773 
3774 	nv_program_taskfile_regs(nvp, slot);
3775 
3776 	/*
3777 	 * This next one sets the drive in motion
3778 	 */
3779 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3780 
3781 	return (SATA_TRAN_ACCEPTED);
3782 }
3783 
3784 
3785 /*
3786  * start a PIO data-out ATA command
3787  */
3788 static int
3789 nv_start_pio_out(nv_port_t *nvp, int slot)
3790 {
3791 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3792 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3793 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3794 
3795 	nv_program_taskfile_regs(nvp, slot);
3796 
3797 	/*
3798 	 * this next one sets the drive in motion
3799 	 */
3800 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3801 
3802 	/*
3803 	 * wait for the busy bit to settle
3804 	 */
3805 	NV_DELAY_NSEC(400);
3806 
3807 	/*
3808 	 * wait for the drive to assert DRQ to send the first chunk
3809 	 * of data. Have to busy wait because there's no interrupt for
3810 	 * the first chunk. This is bad... uses a lot of cycles if the
3811 	 * drive responds too slowly or if the wait loop granularity
3812 	 * is too large. It's even worse if the drive is defective and
3813 	 * the loop times out.
3814 	 */
3815 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
3816 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
3817 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
3818 	    4000000, 0) == B_FALSE) {
3819 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3820 
3821 		goto error;
3822 	}
3823 
3824 	/*
3825 	 * send the first block.
3826 	 */
3827 	nv_intr_pio_out(nvp, nv_slotp);
3828 
3829 	/*
3830 	 * If nvslot_flags is not set to COMPLETE yet, then processing
3831 	 * is OK so far, so return.  Otherwise, fall into error handling
3832 	 * below.
3833 	 */
3834 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
3835 
3836 		return (SATA_TRAN_ACCEPTED);
3837 	}
3838 
3839 	error:
3840 	/*
3841 	 * there was an error so reset the device and complete the packet.
3842 	 */
3843 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3844 	nv_complete_io(nvp, spkt, 0);
3845 	nv_reset(nvp);
3846 
3847 	return (SATA_TRAN_PORT_ERROR);
3848 }
3849 
3850 
3851 /*
3852  * Interrupt processing for a non-data ATA command.
3853  */
3854 static void
3855 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
3856 {
3857 	uchar_t status;
3858 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3859 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3860 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3861 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3862 
3863 	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
3864 
3865 	status = nv_get8(cmdhdl, nvp->nvp_status);
3866 
3867 	/*
3868 	 * check for errors
3869 	 */
3870 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
3871 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3872 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3873 		    nvp->nvp_altstatus);
3874 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3875 	} else {
3876 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
3877 	}
3878 
3879 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3880 }
3881 
3882 
3883 /*
3884  * ATA command, PIO data in
3885  */
3886 static void
3887 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
3888 {
3889 	uchar_t	status;
3890 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3891 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3892 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3893 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3894 	int count;
3895 
3896 	status = nv_get8(cmdhdl, nvp->nvp_status);
3897 
3898 	if (status & SATA_STATUS_BSY) {
3899 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3900 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3901 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3902 		    nvp->nvp_altstatus);
3903 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3904 		nv_reset(nvp);
3905 
3906 		return;
3907 	}
3908 
3909 	/*
3910 	 * check for errors
3911 	 */
3912 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
3913 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
3914 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3915 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3916 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3917 
3918 		return;
3919 	}
3920 
3921 	/*
3922 	 * read the next chunk of data (if any)
3923 	 */
3924 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
3925 
3926 	/*
3927 	 * read count bytes
3928 	 */
3929 	ASSERT(count != 0);
3930 
3931 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
3932 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
3933 
3934 	nv_slotp->nvslot_v_addr += count;
3935 	nv_slotp->nvslot_byte_count -= count;
3936 
3937 
3938 	if (nv_slotp->nvslot_byte_count != 0) {
3939 		/*
3940 		 * more to transfer.  Wait for next interrupt.
3941 		 */
3942 		return;
3943 	}
3944 
3945 	/*
3946 	 * transfer is complete. wait for the busy bit to settle.
3947 	 */
3948 	NV_DELAY_NSEC(400);
3949 
3950 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
3951 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3952 }
3953 
3954 
3955 /*
3956  * ATA command PIO data out
3957  */
3958 static void
3959 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
3960 {
3961 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3962 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3963 	uchar_t status;
3964 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3965 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3966 	int count;
3967 
3968 	/*
3969 	 * clear the IRQ
3970 	 */
3971 	status = nv_get8(cmdhdl, nvp->nvp_status);
3972 
3973 	if (status & SATA_STATUS_BSY) {
3974 		/*
3975 		 * this should not happen
3976 		 */
3977 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3978 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3979 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3980 		    nvp->nvp_altstatus);
3981 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3982 
3983 		return;
3984 	}
3985 
3986 	/*
3987 	 * check for errors
3988 	 */
3989 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
3990 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
3991 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3992 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3993 
3994 		return;
3995 	}
3996 
3997 	/*
3998 	 * this is the condition which signals the drive is
3999 	 * no longer ready to transfer.  Likely that the transfer
4000 	 * completed successfully, but check that byte_count is
4001 	 * zero.
4002 	 */
4003 	if ((status & SATA_STATUS_DRQ) == 0) {
4004 
4005 		if (nv_slotp->nvslot_byte_count == 0) {
4006 			/*
4007 			 * complete; successful transfer
4008 			 */
4009 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4010 		} else {
4011 			/*
4012 			 * error condition, incomplete transfer
4013 			 */
4014 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4015 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4016 		}
4017 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4018 
4019 		return;
4020 	}
4021 
4022 	/*
4023 	 * write the next chunk of data
4024 	 */
4025 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4026 
4027 	/*
4028 	 * read or write count bytes
4029 	 */
4030 
4031 	ASSERT(count != 0);
4032 
4033 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4034 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4035 
4036 	nv_slotp->nvslot_v_addr += count;
4037 	nv_slotp->nvslot_byte_count -= count;
4038 }
4039 
4040 
4041 /*
4042  * ATA command, DMA data in/out
4043  */
4044 static void
4045 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
4046 {
4047 	uchar_t status;
4048 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4049 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4050 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4051 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4052 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4053 	uchar_t	bmicx;
4054 	uchar_t bm_status;
4055 
4056 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4057 
4058 	/*
4059 	 * stop DMA engine.
4060 	 */
4061 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4062 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4063 
4064 	/*
4065 	 * get the status and clear the IRQ, and check for DMA error
4066 	 */
4067 	status = nv_get8(cmdhdl, nvp->nvp_status);
4068 
4069 	/*
4070 	 * check for drive errors
4071 	 */
4072 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4073 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4074 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4075 		(void) nv_bm_status_clear(nvp);
4076 
4077 		return;
4078 	}
4079 
4080 	bm_status = nv_bm_status_clear(nvp);
4081 
4082 	/*
4083 	 * check for bus master errors
4084 	 */
4085 	if (bm_status & BMISX_IDERR) {
4086 		spkt->satapkt_reason = SATA_PKT_RESET;
4087 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4088 		    nvp->nvp_altstatus);
4089 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4090 		nv_reset(nvp);
4091 
4092 		return;
4093 	}
4094 
4095 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4096 }
4097 
4098 
4099 /*
4100  * Wait for a register of a controller to achieve a specific state.
4101  * To return normally, all the bits in the first sub-mask must be ON,
4102  * all the bits in the second sub-mask must be OFF.
4103  * If timeout_usec microseconds pass without the controller achieving
4104  * the desired bit configuration, return TRUE, else FALSE.
4105  *
4106  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4107  * occur for the first 250 us, then switch over to a sleeping wait.
4108  *
4109  */
4110 int
4111 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4112     int type_wait)
4113 {
4114 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4115 	hrtime_t end, cur, start_sleep, start;
4116 	int first_time = B_TRUE;
4117 	ushort_t val;
4118 
4119 	for (;;) {
4120 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4121 
4122 		if ((val & onbits) == onbits && (val & offbits) == 0) {
4123 
4124 			return (B_TRUE);
4125 		}
4126 
4127 		cur = gethrtime();
4128 
4129 		/*
4130 		 * store the start time and calculate the end
4131 		 * time.  also calculate "start_sleep" which is
4132 		 * the point after which the driver will stop busy
4133 		 * waiting and change to sleep waiting.
4134 		 */
4135 		if (first_time) {
4136 			first_time = B_FALSE;
4137 			/*
4138 			 * start and end are in nanoseconds
4139 			 */
4140 			start = cur;
4141 			end = start + timeout_usec * 1000;
4142 			/*
4143 			 * add 1 ms to start
4144 			 */
4145 			start_sleep =  start + 250000;
4146 
4147 			if (servicing_interrupt()) {
4148 				type_wait = NV_NOSLEEP;
4149 			}
4150 		}
4151 
4152 		if (cur > end) {
4153 
4154 			break;
4155 		}
4156 
4157 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4158 #if ! defined(__lock_lint)
4159 			delay(1);
4160 #endif
4161 		} else {
4162 			drv_usecwait(nv_usec_delay);
4163 		}
4164 	}
4165 
4166 	return (B_FALSE);
4167 }
4168 
4169 
4170 /*
4171  * This is a slightly more complicated version that checks
4172  * for error conditions and bails-out rather than looping
4173  * until the timeout is exceeded.
4174  *
4175  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4176  * occur for the first 250 us, then switch over to a sleeping wait.
4177  */
4178 int
4179 nv_wait3(
4180 	nv_port_t	*nvp,
4181 	uchar_t		onbits1,
4182 	uchar_t		offbits1,
4183 	uchar_t		failure_onbits2,
4184 	uchar_t		failure_offbits2,
4185 	uchar_t		failure_onbits3,
4186 	uchar_t		failure_offbits3,
4187 	uint_t		timeout_usec,
4188 	int		type_wait)
4189 {
4190 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4191 	hrtime_t end, cur, start_sleep, start;
4192 	int first_time = B_TRUE;
4193 	ushort_t val;
4194 
4195 	for (;;) {
4196 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4197 
4198 		/*
4199 		 * check for expected condition
4200 		 */
4201 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4202 
4203 			return (B_TRUE);
4204 		}
4205 
4206 		/*
4207 		 * check for error conditions
4208 		 */
4209 		if ((val & failure_onbits2) == failure_onbits2 &&
4210 		    (val & failure_offbits2) == 0) {
4211 
4212 			return (B_FALSE);
4213 		}
4214 
4215 		if ((val & failure_onbits3) == failure_onbits3 &&
4216 		    (val & failure_offbits3) == 0) {
4217 
4218 			return (B_FALSE);
4219 		}
4220 
4221 		/*
4222 		 * store the start time and calculate the end
4223 		 * time.  also calculate "start_sleep" which is
4224 		 * the point after which the driver will stop busy
4225 		 * waiting and change to sleep waiting.
4226 		 */
4227 		if (first_time) {
4228 			first_time = B_FALSE;
4229 			/*
4230 			 * start and end are in nanoseconds
4231 			 */
4232 			cur = start = gethrtime();
4233 			end = start + timeout_usec * 1000;
4234 			/*
4235 			 * add 1 ms to start
4236 			 */
4237 			start_sleep =  start + 250000;
4238 
4239 			if (servicing_interrupt()) {
4240 				type_wait = NV_NOSLEEP;
4241 			}
4242 		} else {
4243 			cur = gethrtime();
4244 		}
4245 
4246 		if (cur > end) {
4247 
4248 			break;
4249 		}
4250 
4251 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4252 #if ! defined(__lock_lint)
4253 			delay(1);
4254 #endif
4255 		} else {
4256 			drv_usecwait(nv_usec_delay);
4257 		}
4258 	}
4259 
4260 	return (B_FALSE);
4261 }
4262 
4263 
4264 /*
4265  * nv_check_link() checks if a specified link is active device present
4266  * and communicating.
4267  */
4268 static boolean_t
4269 nv_check_link(uint32_t sstatus)
4270 {
4271 	uint8_t det;
4272 
4273 	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4274 
4275 	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4276 }
4277 
4278 
4279 /*
4280  * nv_port_state_change() reports the state of the port to the
4281  * sata module by calling sata_hba_event_notify().  This
4282  * function is called any time the state of the port is changed
4283  */
4284 static void
4285 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4286 {
4287 	sata_device_t sd;
4288 
4289 	bzero((void *)&sd, sizeof (sata_device_t));
4290 	sd.satadev_rev = SATA_DEVICE_REV;
4291 	nv_copy_registers(nvp, &sd, NULL);
4292 
4293 	/*
4294 	 * When NCQ is implemented sactive and snotific field need to be
4295 	 * updated.
4296 	 */
4297 	sd.satadev_addr.cport = nvp->nvp_port_num;
4298 	sd.satadev_addr.qual = addr_type;
4299 	sd.satadev_state = state;
4300 
4301 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4302 }
4303 
4304 
4305 /*
4306  * timeout processing:
4307  *
4308  * Check if any packets have crossed a timeout threshold.  If so, then
4309  * abort the packet.  This function is not NCQ aware.
4310  *
4311  * If reset was invoked in any other place than nv_sata_probe(), then
4312  * monitor for reset completion here.
4313  *
4314  */
4315 static void
4316 nv_timeout(void *arg)
4317 {
4318 	nv_port_t *nvp = arg;
4319 	nv_slot_t *nv_slotp;
4320 	int restart_timeout = B_FALSE;
4321 
4322 	mutex_enter(&nvp->nvp_mutex);
4323 
4324 	/*
4325 	 * If the probe entry point is driving the reset and signature
4326 	 * acquisition, just return.
4327 	 */
4328 	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
4329 		goto finished;
4330 	}
4331 
4332 	/*
4333 	 * If the port is not in the init state, it likely
4334 	 * means the link was lost while a timeout was active.
4335 	 */
4336 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
4337 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4338 		    "nv_timeout: port uninitialized"));
4339 
4340 		goto finished;
4341 	}
4342 
4343 	if (nvp->nvp_state & NV_PORT_RESET) {
4344 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4345 		uint32_t sstatus;
4346 
4347 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4348 		    "nv_timeout(): port waiting for signature"));
4349 
4350 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4351 
4352 		/*
4353 		 * check for link presence.  If the link remains
4354 		 * missing for more than 2 seconds, send a remove
4355 		 * event and abort signature acquisition.
4356 		 */
4357 		if (nv_check_link(sstatus) == B_FALSE) {
4358 			clock_t e_link_lost = ddi_get_lbolt();
4359 
4360 			if (nvp->nvp_link_lost_time == 0) {
4361 				nvp->nvp_link_lost_time = e_link_lost;
4362 			}
4363 			if (TICK_TO_SEC(e_link_lost -
4364 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
4365 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4366 				    "probe: intermittent link lost while"
4367 				    " resetting"));
4368 				restart_timeout = B_TRUE;
4369 			} else {
4370 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4371 				    "link lost during signature acquisition."
4372 				    "  Giving up"));
4373 				nv_port_state_change(nvp,
4374 				    SATA_EVNT_DEVICE_DETACHED|
4375 				    SATA_EVNT_LINK_LOST,
4376 				    SATA_ADDR_CPORT, 0);
4377 				nvp->nvp_state |= NV_PORT_HOTREMOVED;
4378 				nvp->nvp_state &= ~NV_PORT_RESET;
4379 			}
4380 
4381 			goto finished;
4382 		} else {
4383 
4384 			nvp->nvp_link_lost_time = 0;
4385 		}
4386 
4387 		nv_read_signature(nvp);
4388 
4389 		if (nvp->nvp_signature != 0) {
4390 			if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
4391 				nvp->nvp_state |= NV_PORT_RESTORE;
4392 				nv_port_state_change(nvp,
4393 				    SATA_EVNT_DEVICE_RESET,
4394 				    SATA_ADDR_DCPORT,
4395 				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
4396 			}
4397 
4398 			goto finished;
4399 		}
4400 
4401 		/*
4402 		 * Reset if more than 5 seconds has passed without
4403 		 * acquiring a signature.
4404 		 */
4405 		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
4406 			nv_reset(nvp);
4407 		}
4408 
4409 		restart_timeout = B_TRUE;
4410 		goto finished;
4411 	}
4412 
4413 
4414 	/*
4415 	 * not yet NCQ aware
4416 	 */
4417 	nv_slotp = &(nvp->nvp_slot[0]);
4418 
4419 	/*
4420 	 * this happens early on before nv_slotp is set
4421 	 * up OR when a device was unexpectedly removed and
4422 	 * there was an active packet.
4423 	 */
4424 	if (nv_slotp == NULL) {
4425 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4426 		    "nv_timeout: nv_slotp == NULL"));
4427 
4428 		goto finished;
4429 	}
4430 
4431 	/*
4432 	 * perform timeout checking and processing only if there is an
4433 	 * active packet on the port
4434 	 */
4435 	if (nv_slotp->nvslot_spkt != NULL)  {
4436 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4437 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4438 		uint8_t cmd = satacmd->satacmd_cmd_reg;
4439 		uint64_t lba;
4440 
4441 #if ! defined(__lock_lint) && defined(DEBUG)
4442 
4443 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
4444 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
4445 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
4446 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
4447 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
4448 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
4449 #endif
4450 
4451 		/*
4452 		 * timeout not needed if there is a polling thread
4453 		 */
4454 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
4455 
4456 			goto finished;
4457 		}
4458 
4459 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
4460 		    spkt->satapkt_time) {
4461 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4462 			    "abort timeout: "
4463 			    "nvslot_stime: %ld max ticks till timeout: "
4464 			    "%ld cur_time: %ld cmd=%x lba=%d",
4465 			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
4466 			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
4467 
4468 			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
4469 
4470 		} else {
4471 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
4472 			    " still in use so restarting timeout"));
4473 		}
4474 		restart_timeout = B_TRUE;
4475 
4476 	} else {
4477 		/*
4478 		 * there was no active packet, so do not re-enable timeout
4479 		 */
4480 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4481 		    "nv_timeout: no active packet so not re-arming timeout"));
4482 	}
4483 
4484 	finished:
4485 
4486 	if (restart_timeout == B_TRUE) {
4487 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
4488 		    drv_usectohz(NV_ONE_SEC));
4489 	} else {
4490 		nvp->nvp_timeout_id = 0;
4491 	}
4492 	mutex_exit(&nvp->nvp_mutex);
4493 }
4494 
4495 
4496 /*
4497  * enable or disable the 3 interrupt types the driver is
4498  * interested in: completion, add and remove.
4499  */
4500 static void
4501 mcp04_set_intr(nv_port_t *nvp, int flag)
4502 {
4503 	nv_ctl_t *nvc = nvp->nvp_ctlp;
4504 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
4505 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
4506 	uint8_t intr_bits[] = { MCP04_INT_PDEV_HOT|MCP04_INT_PDEV_INT,
4507 	    MCP04_INT_SDEV_HOT|MCP04_INT_SDEV_INT };
4508 	uint8_t clear_all_bits[] = { MCP04_INT_PDEV_ALL, MCP04_INT_SDEV_ALL };
4509 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
4510 
4511 	ASSERT(mutex_owned(&nvp->nvp_mutex));
4512 
4513 	/*
4514 	 * controller level lock also required since access to an 8-bit
4515 	 * interrupt register is shared between both channels.
4516 	 */
4517 	mutex_enter(&nvc->nvc_mutex);
4518 
4519 	if (flag & NV_INTR_CLEAR_ALL) {
4520 		NVLOG((NVDBG_INTR, nvc, nvp,
4521 		    "mcp04_set_intr: NV_INTR_CLEAR_ALL"));
4522 
4523 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
4524 		    (uint8_t *)(nvc->nvc_mcp04_int_status));
4525 
4526 		if (intr_status & clear_all_bits[port]) {
4527 
4528 			nv_put8(nvc->nvc_bar_hdl[5],
4529 			    (uint8_t *)(nvc->nvc_mcp04_int_status),
4530 			    clear_all_bits[port]);
4531 
4532 			NVLOG((NVDBG_INTR, nvc, nvp,
4533 			    "interrupt bits cleared %x",
4534 			    intr_status & clear_all_bits[port]));
4535 		}
4536 	}
4537 
4538 	if (flag & NV_INTR_DISABLE) {
4539 		NVLOG((NVDBG_INTR, nvc, nvp,
4540 		    "mcp04_set_intr: NV_INTR_DISABLE"));
4541 		int_en = nv_get8(bar5_hdl,
4542 		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
4543 		int_en &= ~intr_bits[port];
4544 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
4545 		    int_en);
4546 	}
4547 
4548 	if (flag & NV_INTR_ENABLE) {
4549 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp04_set_intr: NV_INTR_ENABLE"));
4550 		int_en = nv_get8(bar5_hdl,
4551 		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
4552 		int_en |= intr_bits[port];
4553 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
4554 		    int_en);
4555 	}
4556 
4557 	mutex_exit(&nvc->nvc_mutex);
4558 }
4559 
4560 
4561 /*
4562  * enable or disable the 3 interrupts the driver is interested in:
4563  * completion interrupt, hot add, and hot remove interrupt.
4564  */
4565 static void
4566 mcp55_set_intr(nv_port_t *nvp, int flag)
4567 {
4568 	nv_ctl_t *nvc = nvp->nvp_ctlp;
4569 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
4570 	uint16_t intr_bits =
4571 	    MCP55_INT_ADD|MCP55_INT_REM|MCP55_INT_COMPLETE;
4572 	uint16_t int_en;
4573 
4574 	ASSERT(mutex_owned(&nvp->nvp_mutex));
4575 
4576 	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
4577 
4578 	if (flag & NV_INTR_CLEAR_ALL) {
4579 		NVLOG((NVDBG_INTR, nvc, nvp,
4580 		    "mcp55_set_intr: NV_INTR_CLEAR_ALL"));
4581 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, MCP55_INT_CLEAR);
4582 	}
4583 
4584 	if (flag & NV_INTR_ENABLE) {
4585 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_set_intr: NV_INTR_ENABLE"));
4586 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
4587 		int_en |= intr_bits;
4588 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
4589 	}
4590 
4591 	if (flag & NV_INTR_DISABLE) {
4592 		NVLOG((NVDBG_INTR, nvc, nvp,
4593 		    "mcp55_set_intr: NV_INTR_DISABLE"));
4594 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
4595 		int_en &= ~intr_bits;
4596 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
4597 	}
4598 }
4599 
4600 
4601 /*
4602  * The PM functions for suspend and resume are incomplete and need additional
4603  * work.  It may or may not work in the current state.
4604  */
4605 static void
4606 nv_resume(nv_port_t *nvp)
4607 {
4608 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
4609 
4610 	mutex_enter(&nvp->nvp_mutex);
4611 
4612 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
4613 		mutex_exit(&nvp->nvp_mutex);
4614 
4615 		return;
4616 	}
4617 
4618 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
4619 
4620 	/*
4621 	 * power may have been removed to the port and the
4622 	 * drive, and/or a drive may have been added or removed.
4623 	 * Force a reset which will cause a probe and re-establish
4624 	 * any state needed on the drive.
4625 	 * nv_reset(nvp);
4626 	 */
4627 
4628 	nv_reset(nvp);
4629 
4630 	mutex_exit(&nvp->nvp_mutex);
4631 }
4632 
4633 /*
4634  * The PM functions for suspend and resume are incomplete and need additional
4635  * work.  It may or may not work in the current state.
4636  */
4637 static void
4638 nv_suspend(nv_port_t *nvp)
4639 {
4640 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()"));
4641 
4642 	mutex_enter(&nvp->nvp_mutex);
4643 
4644 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
4645 		mutex_exit(&nvp->nvp_mutex);
4646 
4647 		return;
4648 	}
4649 
4650 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE);
4651 
4652 	/*
4653 	 * power may have been removed to the port and the
4654 	 * drive, and/or a drive may have been added or removed.
4655 	 * Force a reset which will cause a probe and re-establish
4656 	 * any state needed on the drive.
4657 	 * nv_reset(nvp);
4658 	 */
4659 
4660 	mutex_exit(&nvp->nvp_mutex);
4661 }
4662 
4663 
4664 static void
4665 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
4666 {
4667 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4668 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
4669 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4670 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4671 	uchar_t status;
4672 	struct sata_cmd_flags flags;
4673 
4674 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
4675 
4676 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4677 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
4678 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
4679 
4680 	if (spkt == NULL) {
4681 
4682 		return;
4683 	}
4684 
4685 	/*
4686 	 * in the error case, implicitly set the return of regs needed
4687 	 * for error handling.
4688 	 */
4689 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
4690 	    nvp->nvp_altstatus);
4691 
4692 	flags = scmd->satacmd_flags;
4693 
4694 	if (status & SATA_STATUS_ERR) {
4695 		flags.sata_copy_out_lba_low_msb = B_TRUE;
4696 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
4697 		flags.sata_copy_out_lba_high_msb = B_TRUE;
4698 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
4699 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
4700 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
4701 		flags.sata_copy_out_error_reg = B_TRUE;
4702 		flags.sata_copy_out_sec_count_msb = B_TRUE;
4703 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
4704 		scmd->satacmd_status_reg = status;
4705 	}
4706 
4707 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
4708 
4709 		/*
4710 		 * set HOB so that high byte will be read
4711 		 */
4712 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
4713 
4714 		/*
4715 		 * get the requested high bytes
4716 		 */
4717 		if (flags.sata_copy_out_sec_count_msb) {
4718 			scmd->satacmd_sec_count_msb =
4719 			    nv_get8(cmdhdl, nvp->nvp_count);
4720 		}
4721 
4722 		if (flags.sata_copy_out_lba_low_msb) {
4723 			scmd->satacmd_lba_low_msb =
4724 			    nv_get8(cmdhdl, nvp->nvp_sect);
4725 		}
4726 
4727 		if (flags.sata_copy_out_lba_mid_msb) {
4728 			scmd->satacmd_lba_mid_msb =
4729 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
4730 		}
4731 
4732 		if (flags.sata_copy_out_lba_high_msb) {
4733 			scmd->satacmd_lba_high_msb =
4734 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
4735 		}
4736 	}
4737 
4738 	/*
4739 	 * disable HOB so that low byte is read
4740 	 */
4741 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
4742 
4743 	/*
4744 	 * get the requested low bytes
4745 	 */
4746 	if (flags.sata_copy_out_sec_count_lsb) {
4747 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
4748 	}
4749 
4750 	if (flags.sata_copy_out_lba_low_lsb) {
4751 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
4752 	}
4753 
4754 	if (flags.sata_copy_out_lba_mid_lsb) {
4755 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
4756 	}
4757 
4758 	if (flags.sata_copy_out_lba_high_lsb) {
4759 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
4760 	}
4761 
4762 	/*
4763 	 * get the device register if requested
4764 	 */
4765 	if (flags.sata_copy_out_device_reg) {
4766 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
4767 	}
4768 
4769 	/*
4770 	 * get the error register if requested
4771 	 */
4772 	if (flags.sata_copy_out_error_reg) {
4773 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4774 	}
4775 }
4776 
4777 
4778 /*
4779  * Hot plug and remove interrupts can occur when the device is reset.  Just
4780  * masking the interrupt doesn't always work well because if a
4781  * different interrupt arrives on the other port, the driver can still
4782  * end up checking the state of the other port and discover the hot
4783  * interrupt flag is set even though it was masked.  Checking for recent
4784  * reset activity and then ignoring turns out to be the easiest way.
4785  */
4786 static void
4787 nv_report_add_remove(nv_port_t *nvp, int flags)
4788 {
4789 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4790 	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
4791 	uint32_t sstatus;
4792 	int i;
4793 
4794 	/*
4795 	 * If reset within last 1 second ignore.  This should be
4796 	 * reworked and improved instead of having this somewhat
4797 	 * heavy handed clamping job.
4798 	 */
4799 	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
4800 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
4801 		    "ignoring plug interrupt was %dms ago",
4802 		    TICK_TO_MSEC(time_diff)));
4803 
4804 		return;
4805 	}
4806 
4807 	/*
4808 	 * wait up to 1ms for sstatus to settle and reflect the true
4809 	 * status of the port.  Failure to do so can create confusion
4810 	 * in probe, where the incorrect sstatus value can still
4811 	 * persist.
4812 	 */
4813 	for (i = 0; i < 1000; i++) {
4814 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4815 
4816 		if ((flags == NV_PORT_HOTREMOVED) &&
4817 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
4818 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
4819 			break;
4820 		}
4821 
4822 		if ((flags != NV_PORT_HOTREMOVED) &&
4823 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
4824 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
4825 			break;
4826 		}
4827 		drv_usecwait(1);
4828 	}
4829 
4830 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4831 	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
4832 
4833 	if (flags == NV_PORT_HOTREMOVED) {
4834 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4835 		    "nv_report_add_remove() hot removed"));
4836 		nv_port_state_change(nvp,
4837 		    SATA_EVNT_DEVICE_DETACHED,
4838 		    SATA_ADDR_CPORT, 0);
4839 
4840 		nvp->nvp_state |= NV_PORT_HOTREMOVED;
4841 	} else {
4842 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4843 		    "nv_report_add_remove() hot plugged"));
4844 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
4845 		    SATA_ADDR_CPORT, 0);
4846 	}
4847 }
4848