xref: /illumos-gate/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c (revision 3e95bd4ab92abca814bd28e854607d1975c7dc88)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  *
28  * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
29  * based chipsets.
30  *
31  * NCQ
32  * ---
33  *
34  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
35  * and is likely to be revisited in the future.
36  *
37  *
38  * Power Management
39  * ----------------
40  *
41  * Normally power management would be responsible for ensuring the device
42  * is quiescent and then changing power states to the device, such as
43  * powering down parts or all of the device.  mcp5x/ck804 is unique in
44  * that it is only available as part of a larger southbridge chipset, so
45  * removing power to the device isn't possible.  Switches to control
46  * power management states D0/D3 in the PCI configuration space appear to
47  * be supported but changes to these states are apparently are ignored.
48  * The only further PM that the driver _could_ do is shut down the PHY,
49  * but in order to deliver the first rev of the driver sooner than later,
50  * that will be deferred until some future phase.
51  *
52  * Since the driver currently will not directly change any power state to
53  * the device, no power() entry point will be required.  However, it is
54  * possible that in ACPI power state S3, aka suspend to RAM, that power
55  * can be removed to the device, and the driver cannot rely on BIOS to
56  * have reset any state.  For the time being, there is no known
57  * non-default configurations that need to be programmed.  This judgement
58  * is based on the port of the legacy ata driver not having any such
59  * functionality and based on conversations with the PM team.  If such a
60  * restoration is later deemed necessary it can be incorporated into the
61  * DDI_RESUME processing.
62  *
63  */
64 
65 #include <sys/scsi/scsi.h>
66 #include <sys/pci.h>
67 #include <sys/byteorder.h>
68 #include <sys/sunddi.h>
69 #include <sys/sata/sata_hba.h>
70 #ifdef SGPIO_SUPPORT
71 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
72 #include <sys/devctl.h>
73 #include <sys/sdt.h>
74 #endif
75 #include <sys/sata/adapters/nv_sata/nv_sata.h>
76 #include <sys/disp.h>
77 #include <sys/note.h>
78 #include <sys/promif.h>
79 
80 
81 /*
82  * Function prototypes for driver entry points
83  */
84 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
85 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
86 static int nv_quiesce(dev_info_t *dip);
87 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
88     void *arg, void **result);
89 
90 /*
91  * Function prototypes for entry points from sata service module
92  * These functions are distinguished from other local functions
93  * by the prefix "nv_sata_"
94  */
95 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
96 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
97 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
98 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
99 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
100 
101 /*
102  * Local function prototypes
103  */
104 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
105 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
106 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
107 #ifdef NV_MSI_SUPPORTED
108 static int nv_add_msi_intrs(nv_ctl_t *nvc);
109 #endif
110 static void nv_rem_intrs(nv_ctl_t *nvc);
111 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
112 static int nv_start_nodata(nv_port_t *nvp, int slot);
113 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
114 static int nv_start_pio_in(nv_port_t *nvp, int slot);
115 static int nv_start_pio_out(nv_port_t *nvp, int slot);
116 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
117 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
118 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
119 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
120 static int nv_start_dma(nv_port_t *nvp, int slot);
121 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
122 static void nv_uninit_ctl(nv_ctl_t *nvc);
123 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
124 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125 static void nv_uninit_port(nv_port_t *nvp);
126 static int nv_init_port(nv_port_t *nvp);
127 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
128 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
129 #ifdef NCQ
130 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 #endif
132 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
133 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
134     int state);
135 static void nv_common_reg_init(nv_ctl_t *nvc);
136 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
137 static void nv_reset(nv_port_t *nvp, char *reason);
138 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
139 static void nv_timeout(void *);
140 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
141 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
142 static void nv_read_signature(nv_port_t *nvp);
143 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
144 static void ck804_set_intr(nv_port_t *nvp, int flag);
145 static void nv_resume(nv_port_t *nvp);
146 static void nv_suspend(nv_port_t *nvp);
147 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
148 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
149     int flag);
150 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
151     sata_pkt_t *spkt);
152 static void nv_report_add_remove(nv_port_t *nvp, int flags);
153 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
154 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
155     uchar_t failure_onbits2, uchar_t failure_offbits2,
156     uchar_t failure_onbits3, uchar_t failure_offbits3,
157     uint_t timeout_usec, int type_wait);
158 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
159     uint_t timeout_usec, int type_wait);
160 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
161 static void nv_init_port_link_processing(nv_ctl_t *nvc);
162 static void nv_setup_timeout(nv_port_t *nvp, int time);
163 static void nv_monitor_reset(nv_port_t *nvp);
164 static int nv_bm_status_clear(nv_port_t *nvp);
165 static void nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...);
166 
167 #ifdef SGPIO_SUPPORT
168 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
169 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
170 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
171     cred_t *credp, int *rvalp);
172 
173 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
174 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
175     uint32_t *cbpp);
176 static int nv_sgp_init(nv_ctl_t *nvc);
177 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
178 static int nv_sgp_csr_read(nv_ctl_t *nvc);
179 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
180 static int nv_sgp_write_data(nv_ctl_t *nvc);
181 static void nv_sgp_activity_led_ctl(void *arg);
182 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
183 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
184 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
185 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
186 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
187 static void nv_sgp_cleanup(nv_ctl_t *nvc);
188 #endif
189 
190 
191 /*
192  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
193  * Verify if needed if ported to other ISA.
194  */
195 static ddi_dma_attr_t buffer_dma_attr = {
196 	DMA_ATTR_V0,		/* dma_attr_version */
197 	0,			/* dma_attr_addr_lo: lowest bus address */
198 	0xffffffffull,		/* dma_attr_addr_hi: */
199 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
200 	4,			/* dma_attr_align */
201 	1,			/* dma_attr_burstsizes. */
202 	1,			/* dma_attr_minxfer */
203 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
204 	0xffffffffull,		/* dma_attr_seg */
205 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
206 	512,			/* dma_attr_granular */
207 	0,			/* dma_attr_flags */
208 };
209 static ddi_dma_attr_t buffer_dma_40bit_attr = {
210 	DMA_ATTR_V0,		/* dma_attr_version */
211 	0,			/* dma_attr_addr_lo: lowest bus address */
212 	0xffffffffffull,	/* dma_attr_addr_hi: */
213 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
214 	4,			/* dma_attr_align */
215 	1,			/* dma_attr_burstsizes. */
216 	1,			/* dma_attr_minxfer */
217 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
218 	0xffffffffull,		/* dma_attr_seg */
219 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
220 	512,			/* dma_attr_granular */
221 	0,			/* dma_attr_flags */
222 };
223 
224 
225 /*
226  * DMA attributes for PRD tables
227  */
228 ddi_dma_attr_t nv_prd_dma_attr = {
229 	DMA_ATTR_V0,		/* dma_attr_version */
230 	0,			/* dma_attr_addr_lo */
231 	0xffffffffull,		/* dma_attr_addr_hi */
232 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
233 	4,			/* dma_attr_align */
234 	1,			/* dma_attr_burstsizes */
235 	1,			/* dma_attr_minxfer */
236 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
237 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
238 	1,			/* dma_attr_sgllen */
239 	1,			/* dma_attr_granular */
240 	0			/* dma_attr_flags */
241 };
242 
243 /*
244  * Device access attributes
245  */
246 static ddi_device_acc_attr_t accattr = {
247     DDI_DEVICE_ATTR_V0,
248     DDI_STRUCTURE_LE_ACC,
249     DDI_STRICTORDER_ACC
250 };
251 
252 
253 #ifdef SGPIO_SUPPORT
254 static struct cb_ops nv_cb_ops = {
255 	nv_open,		/* open */
256 	nv_close,		/* close */
257 	nodev,			/* strategy (block) */
258 	nodev,			/* print (block) */
259 	nodev,			/* dump (block) */
260 	nodev,			/* read */
261 	nodev,			/* write */
262 	nv_ioctl,		/* ioctl */
263 	nodev,			/* devmap */
264 	nodev,			/* mmap */
265 	nodev,			/* segmap */
266 	nochpoll,		/* chpoll */
267 	ddi_prop_op,		/* prop_op */
268 	NULL,			/* streams */
269 	D_NEW | D_MP |
270 	D_64BIT | D_HOTPLUG,	/* flags */
271 	CB_REV			/* rev */
272 };
273 #endif  /* SGPIO_SUPPORT */
274 
275 
276 static struct dev_ops nv_dev_ops = {
277 	DEVO_REV,		/* devo_rev */
278 	0,			/* refcnt  */
279 	nv_getinfo,		/* info */
280 	nulldev,		/* identify */
281 	nulldev,		/* probe */
282 	nv_attach,		/* attach */
283 	nv_detach,		/* detach */
284 	nodev,			/* no reset */
285 #ifdef SGPIO_SUPPORT
286 	&nv_cb_ops,		/* driver operations */
287 #else
288 	(struct cb_ops *)0,	/* driver operations */
289 #endif
290 	NULL,			/* bus operations */
291 	NULL,			/* power */
292 	nv_quiesce		/* quiesce */
293 };
294 
295 
296 /*
297  * Request Sense CDB for ATAPI
298  */
299 static const uint8_t nv_rqsense_cdb[16] = {
300 	SCMD_REQUEST_SENSE,
301 	0,
302 	0,
303 	0,
304 	SATA_ATAPI_MIN_RQSENSE_LEN,
305 	0,
306 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
307 };
308 
309 
310 static sata_tran_hotplug_ops_t nv_hotplug_ops;
311 
312 extern struct mod_ops mod_driverops;
313 
314 static  struct modldrv modldrv = {
315 	&mod_driverops,	/* driverops */
316 	"Nvidia ck804/mcp51/mcp55 HBA",
317 	&nv_dev_ops,	/* driver ops */
318 };
319 
320 static  struct modlinkage modlinkage = {
321 	MODREV_1,
322 	&modldrv,
323 	NULL
324 };
325 
326 
327 /*
328  * Wait for a signature.
329  * If this variable is non-zero, the driver will wait for a device signature
330  * before reporting a device reset to the sata module.
331  * Some (most?) drives will not process commands sent to them before D2H FIS
332  * is sent to a host.
333  */
334 int nv_wait_for_signature = 1;
335 
336 /*
337  * Check for a signature availability.
338  * If this variable is non-zero, the driver will check task file error register
339  * for indication of a signature availability before reading a signature.
340  * Task file error register bit 0 set to 1 indicates that the drive
341  * is ready and it has sent the D2H FIS with a signature.
342  * This behavior of the error register is not reliable in the mcp5x controller.
343  */
344 int nv_check_tfr_error = 0;
345 
346 /*
347  * Max signature acquisition time, in milliseconds.
348  * The driver will try to acquire a device signature within specified time and
349  * quit acquisition operation if signature was not acquired.
350  */
351 long nv_sig_acquisition_time = NV_SIG_ACQUISITION_TIME;
352 
353 /*
354  * If this variable is non-zero, the driver will wait for a signature in the
355  * nv_monitor_reset function without any time limit.
356  * Used for debugging and drive evaluation.
357  */
358 int nv_wait_here_forever = 0;
359 
360 /*
361  * Reset after hotplug.
362  * If this variable is non-zero, driver will reset device after hotplug
363  * (device attached) interrupt.
364  * If the variable is zero, driver will not reset the new device nor will it
365  * try to read device signature.
366  * Chipset is generating a hotplug (device attached) interrupt with a delay, so
367  * the device should have already sent the D2H FIS with the signature.
368  */
369 int nv_reset_after_hotplug = 1;
370 
371 /*
372  * Delay after device hotplug.
373  * It specifies the time between detecting a hotplugged device and sending
374  * a notification to the SATA module.
375  * It is used when device is not reset after hotpugging and acquiring signature
376  * may be unreliable. The delay should be long enough for a device to become
377  * ready to accept commands.
378  */
379 int nv_hotplug_delay = NV_HOTPLUG_DELAY;
380 
381 
382 /*
383  * Maximum number of consecutive interrupts processed in the loop in the
384  * single invocation of the port interrupt routine.
385  */
386 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
387 
388 
389 
390 /*
391  * wait between checks of reg status
392  */
393 int nv_usec_delay = NV_WAIT_REG_CHECK;
394 
395 /*
396  * The following is needed for nv_vcmn_err()
397  */
398 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
399 static char nv_log_buf[NV_LOGBUF_LEN];
400 int nv_debug_flags =
401     NVDBG_HOT|NVDBG_RESET|NVDBG_ALWAYS|NVDBG_TIMEOUT|NVDBG_EVENT;
402 int nv_log_to_console = B_FALSE;
403 
404 int nv_prom_print = B_FALSE;
405 
406 /*
407  * for debugging
408  */
409 #ifdef DEBUG
410 int ncq_commands = 0;
411 int non_ncq_commands = 0;
412 #endif
413 
414 /*
415  * Opaque state pointer to be initialized by ddi_soft_state_init()
416  */
417 static void *nv_statep	= NULL;
418 
419 /*
420  * Map from CBP to shared space
421  *
422  * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
423  * Control Block Pointer as well as the corresponding Control Block) that
424  * is shared across all driver instances associated with that part.  The
425  * Control Block is used to update and query the LED state for the devices
426  * on the controllers associated with those instances.  There is also some
427  * driver state (called the 'common' area here) associated with each SGPIO
428  * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
429  * control area.
430  *
431  * The driver can also use this mapping array to determine whether the
432  * common area for a given CBP has been initialized, and, if it isn't
433  * initialized, initialize it.
434  *
435  * When a driver instance with a CBP value that is already in the array is
436  * initialized, it will use the pointer to the previously initialized common
437  * area associated with that SGPIO CBP value, rather than initialize it
438  * itself.
439  *
440  * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
441  */
442 #ifdef SGPIO_SUPPORT
443 static kmutex_t nv_sgp_c2c_mutex;
444 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
445 #endif
446 
447 /* We still have problems in 40-bit DMA support, so disable it by default */
448 int nv_sata_40bit_dma = B_TRUE;
449 
450 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
451 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
452 	nv_sata_activate,	/* activate port. cfgadm -c connect */
453 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
454 };
455 
456 
457 /*
458  *  nv module initialization
459  */
460 int
461 _init(void)
462 {
463 	int	error;
464 #ifdef SGPIO_SUPPORT
465 	int	i;
466 #endif
467 
468 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
469 
470 	if (error != 0) {
471 
472 		return (error);
473 	}
474 
475 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
476 #ifdef SGPIO_SUPPORT
477 	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
478 
479 	for (i = 0; i < NV_MAX_CBPS; i++) {
480 		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
481 		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
482 	}
483 #endif
484 
485 	if ((error = sata_hba_init(&modlinkage)) != 0) {
486 		ddi_soft_state_fini(&nv_statep);
487 		mutex_destroy(&nv_log_mutex);
488 
489 		return (error);
490 	}
491 
492 	error = mod_install(&modlinkage);
493 	if (error != 0) {
494 		sata_hba_fini(&modlinkage);
495 		ddi_soft_state_fini(&nv_statep);
496 		mutex_destroy(&nv_log_mutex);
497 
498 		return (error);
499 	}
500 
501 	return (error);
502 }
503 
504 
505 /*
506  * nv module uninitialize
507  */
508 int
509 _fini(void)
510 {
511 	int	error;
512 
513 	error = mod_remove(&modlinkage);
514 
515 	if (error != 0) {
516 		return (error);
517 	}
518 
519 	/*
520 	 * remove the resources allocated in _init()
521 	 */
522 	mutex_destroy(&nv_log_mutex);
523 #ifdef SGPIO_SUPPORT
524 	mutex_destroy(&nv_sgp_c2c_mutex);
525 #endif
526 	sata_hba_fini(&modlinkage);
527 	ddi_soft_state_fini(&nv_statep);
528 
529 	return (error);
530 }
531 
532 
533 /*
534  * nv _info entry point
535  */
536 int
537 _info(struct modinfo *modinfop)
538 {
539 	return (mod_info(&modlinkage, modinfop));
540 }
541 
542 
543 /*
544  * these wrappers for ddi_{get,put}8 are for observability
545  * with dtrace
546  */
547 #ifdef DEBUG
548 
549 static void
550 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
551 {
552 	ddi_put8(handle, dev_addr, value);
553 }
554 
555 static void
556 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
557 {
558 	ddi_put32(handle, dev_addr, value);
559 }
560 
561 static uint32_t
562 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
563 {
564 	return (ddi_get32(handle, dev_addr));
565 }
566 
567 static void
568 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
569 {
570 	ddi_put16(handle, dev_addr, value);
571 }
572 
573 static uint16_t
574 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
575 {
576 	return (ddi_get16(handle, dev_addr));
577 }
578 
579 static uint8_t
580 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
581 {
582 	return (ddi_get8(handle, dev_addr));
583 }
584 
585 #else
586 
587 #define	nv_put8 ddi_put8
588 #define	nv_put32 ddi_put32
589 #define	nv_get32 ddi_get32
590 #define	nv_put16 ddi_put16
591 #define	nv_get16 ddi_get16
592 #define	nv_get8 ddi_get8
593 
594 #endif
595 
596 
597 /*
598  * Driver attach
599  */
600 static int
601 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
602 {
603 	int status, attach_state, intr_types, bar, i, command;
604 	int inst = ddi_get_instance(dip);
605 	ddi_acc_handle_t pci_conf_handle;
606 	nv_ctl_t *nvc;
607 	uint8_t subclass;
608 	uint32_t reg32;
609 #ifdef SGPIO_SUPPORT
610 	pci_regspec_t *regs;
611 	int rlen;
612 #endif
613 
614 	switch (cmd) {
615 
616 	case DDI_ATTACH:
617 
618 		attach_state = ATTACH_PROGRESS_NONE;
619 
620 		status = ddi_soft_state_zalloc(nv_statep, inst);
621 
622 		if (status != DDI_SUCCESS) {
623 			break;
624 		}
625 
626 		nvc = ddi_get_soft_state(nv_statep, inst);
627 
628 		nvc->nvc_dip = dip;
629 
630 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach(): DDI_ATTACH", NULL);
631 
632 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
633 
634 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
635 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
636 			    PCI_CONF_REVID);
637 			NVLOG(NVDBG_INIT, nvc, NULL,
638 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
639 			    inst, nvc->nvc_revid, nv_debug_flags);
640 		} else {
641 			break;
642 		}
643 
644 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
645 
646 		/*
647 		 * Set the PCI command register: enable IO/MEM/Master.
648 		 */
649 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
650 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
651 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
652 
653 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
654 
655 		if (subclass & PCI_MASS_RAID) {
656 			cmn_err(CE_WARN,
657 			    "attach failed: RAID mode not supported");
658 
659 			break;
660 		}
661 
662 		/*
663 		 * the 6 bars of the controller are:
664 		 * 0: port 0 task file
665 		 * 1: port 0 status
666 		 * 2: port 1 task file
667 		 * 3: port 1 status
668 		 * 4: bus master for both ports
669 		 * 5: extended registers for SATA features
670 		 */
671 		for (bar = 0; bar < 6; bar++) {
672 			status = ddi_regs_map_setup(dip, bar + 1,
673 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
674 			    &nvc->nvc_bar_hdl[bar]);
675 
676 			if (status != DDI_SUCCESS) {
677 				NVLOG(NVDBG_INIT, nvc, NULL,
678 				    "ddi_regs_map_setup failure for bar"
679 				    " %d status = %d", bar, status);
680 				break;
681 			}
682 		}
683 
684 		attach_state |= ATTACH_PROGRESS_BARS;
685 
686 		/*
687 		 * initialize controller structures
688 		 */
689 		status = nv_init_ctl(nvc, pci_conf_handle);
690 
691 		if (status == NV_FAILURE) {
692 			NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl failed",
693 			    NULL);
694 
695 			break;
696 		}
697 
698 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
699 
700 		/*
701 		 * initialize mutexes
702 		 */
703 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
704 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
705 
706 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
707 
708 		/*
709 		 * get supported interrupt types
710 		 */
711 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
712 		    DDI_SUCCESS) {
713 			nv_cmn_err(CE_WARN, nvc, NULL,
714 			    "ddi_intr_get_supported_types failed");
715 
716 			break;
717 		}
718 
719 		NVLOG(NVDBG_INIT, nvc, NULL,
720 		    "ddi_intr_get_supported_types() returned: 0x%x",
721 		    intr_types);
722 
723 #ifdef NV_MSI_SUPPORTED
724 		if (intr_types & DDI_INTR_TYPE_MSI) {
725 			NVLOG(NVDBG_INIT, nvc, NULL,
726 			    "using MSI interrupt type", NULL);
727 
728 			/*
729 			 * Try MSI first, but fall back to legacy if MSI
730 			 * attach fails
731 			 */
732 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
733 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
734 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
735 				NVLOG(NVDBG_INIT, nvc, NULL,
736 				    "MSI interrupt setup done", NULL);
737 			} else {
738 				nv_cmn_err(CE_CONT, nvc, NULL,
739 				    "MSI registration failed "
740 				    "will try Legacy interrupts");
741 			}
742 		}
743 #endif
744 
745 		/*
746 		 * Either the MSI interrupt setup has failed or only
747 		 * the fixed interrupts are available on the system.
748 		 */
749 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
750 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
751 
752 			NVLOG(NVDBG_INIT, nvc, NULL,
753 			    "using Legacy interrupt type", NULL);
754 
755 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
756 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
757 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
758 				NVLOG(NVDBG_INIT, nvc, NULL,
759 				    "Legacy interrupt setup done", NULL);
760 			} else {
761 				nv_cmn_err(CE_WARN, nvc, NULL,
762 				    "legacy interrupt setup failed");
763 				NVLOG(NVDBG_INIT, nvc, NULL,
764 				    "legacy interrupt setup failed", NULL);
765 				break;
766 			}
767 		}
768 
769 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
770 			NVLOG(NVDBG_INIT, nvc, NULL,
771 			    "no interrupts registered", NULL);
772 			break;
773 		}
774 
775 #ifdef SGPIO_SUPPORT
776 		/*
777 		 * save off the controller number
778 		 */
779 		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
780 		    "reg", (caddr_t)&regs, &rlen);
781 		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
782 		kmem_free(regs, rlen);
783 
784 		/*
785 		 * initialize SGPIO
786 		 */
787 		nv_sgp_led_init(nvc, pci_conf_handle);
788 #endif	/* SGPIO_SUPPORT */
789 
790 		/*
791 		 * Initiate link processing and device identification
792 		 */
793 		nv_init_port_link_processing(nvc);
794 		/*
795 		 * attach to sata module
796 		 */
797 		if (sata_hba_attach(nvc->nvc_dip,
798 		    &nvc->nvc_sata_hba_tran,
799 		    DDI_ATTACH) != DDI_SUCCESS) {
800 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
801 
802 			break;
803 		}
804 
805 		pci_config_teardown(&pci_conf_handle);
806 
807 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS", NULL);
808 
809 		return (DDI_SUCCESS);
810 
811 	case DDI_RESUME:
812 
813 		nvc = ddi_get_soft_state(nv_statep, inst);
814 
815 		NVLOG(NVDBG_INIT, nvc, NULL,
816 		    "nv_attach(): DDI_RESUME inst %d", inst);
817 
818 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
819 			return (DDI_FAILURE);
820 		}
821 
822 		/*
823 		 * Set the PCI command register: enable IO/MEM/Master.
824 		 */
825 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
826 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
827 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
828 
829 		/*
830 		 * Need to set bit 2 to 1 at config offset 0x50
831 		 * to enable access to the bar5 registers.
832 		 */
833 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
834 
835 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
836 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
837 			    reg32 | NV_BAR5_SPACE_EN);
838 		}
839 
840 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
841 
842 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
843 			nv_resume(&(nvc->nvc_port[i]));
844 		}
845 
846 		pci_config_teardown(&pci_conf_handle);
847 
848 		return (DDI_SUCCESS);
849 
850 	default:
851 		return (DDI_FAILURE);
852 	}
853 
854 
855 	/*
856 	 * DDI_ATTACH failure path starts here
857 	 */
858 
859 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
860 		nv_rem_intrs(nvc);
861 	}
862 
863 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
864 		/*
865 		 * Remove timers
866 		 */
867 		int port = 0;
868 		nv_port_t *nvp;
869 
870 		for (; port < NV_MAX_PORTS(nvc); port++) {
871 			nvp = &(nvc->nvc_port[port]);
872 			if (nvp->nvp_timeout_id != 0) {
873 				(void) untimeout(nvp->nvp_timeout_id);
874 			}
875 		}
876 	}
877 
878 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
879 		mutex_destroy(&nvc->nvc_mutex);
880 	}
881 
882 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
883 		nv_uninit_ctl(nvc);
884 	}
885 
886 	if (attach_state & ATTACH_PROGRESS_BARS) {
887 		while (--bar >= 0) {
888 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
889 		}
890 	}
891 
892 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
893 		ddi_soft_state_free(nv_statep, inst);
894 	}
895 
896 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
897 		pci_config_teardown(&pci_conf_handle);
898 	}
899 
900 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
901 
902 	return (DDI_FAILURE);
903 }
904 
905 
906 static int
907 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
908 {
909 	int i, port, inst = ddi_get_instance(dip);
910 	nv_ctl_t *nvc;
911 	nv_port_t *nvp;
912 
913 	nvc = ddi_get_soft_state(nv_statep, inst);
914 
915 	switch (cmd) {
916 
917 	case DDI_DETACH:
918 
919 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH", NULL);
920 
921 		/*
922 		 * Remove interrupts
923 		 */
924 		nv_rem_intrs(nvc);
925 
926 		/*
927 		 * Remove timers
928 		 */
929 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
930 			nvp = &(nvc->nvc_port[port]);
931 			if (nvp->nvp_timeout_id != 0) {
932 				(void) untimeout(nvp->nvp_timeout_id);
933 			}
934 		}
935 
936 		/*
937 		 * Remove maps
938 		 */
939 		for (i = 0; i < 6; i++) {
940 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
941 		}
942 
943 		/*
944 		 * Destroy mutexes
945 		 */
946 		mutex_destroy(&nvc->nvc_mutex);
947 
948 		/*
949 		 * Uninitialize the controller structures
950 		 */
951 		nv_uninit_ctl(nvc);
952 
953 #ifdef SGPIO_SUPPORT
954 		/*
955 		 * release SGPIO resources
956 		 */
957 		nv_sgp_cleanup(nvc);
958 #endif
959 
960 		/*
961 		 * unregister from the sata module
962 		 */
963 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
964 
965 		/*
966 		 * Free soft state
967 		 */
968 		ddi_soft_state_free(nv_statep, inst);
969 
970 		return (DDI_SUCCESS);
971 
972 	case DDI_SUSPEND:
973 
974 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND", NULL);
975 
976 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
977 			nv_suspend(&(nvc->nvc_port[i]));
978 		}
979 
980 		nvc->nvc_state |= NV_CTRL_SUSPEND;
981 
982 		return (DDI_SUCCESS);
983 
984 	default:
985 		return (DDI_FAILURE);
986 	}
987 }
988 
989 
990 /*ARGSUSED*/
991 static int
992 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
993 {
994 	nv_ctl_t *nvc;
995 	int instance;
996 	dev_t dev;
997 
998 	dev = (dev_t)arg;
999 	instance = getminor(dev);
1000 
1001 	switch (infocmd) {
1002 	case DDI_INFO_DEVT2DEVINFO:
1003 		nvc = ddi_get_soft_state(nv_statep,  instance);
1004 		if (nvc != NULL) {
1005 			*result = nvc->nvc_dip;
1006 			return (DDI_SUCCESS);
1007 		} else {
1008 			*result = NULL;
1009 			return (DDI_FAILURE);
1010 		}
1011 	case DDI_INFO_DEVT2INSTANCE:
1012 		*(int *)result = instance;
1013 		break;
1014 	default:
1015 		break;
1016 	}
1017 	return (DDI_SUCCESS);
1018 }
1019 
1020 
1021 #ifdef SGPIO_SUPPORT
1022 /* ARGSUSED */
1023 static int
1024 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1025 {
1026 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1027 
1028 	if (nvc == NULL) {
1029 		return (ENXIO);
1030 	}
1031 
1032 	return (0);
1033 }
1034 
1035 
1036 /* ARGSUSED */
1037 static int
1038 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1039 {
1040 	return (0);
1041 }
1042 
1043 
1044 /* ARGSUSED */
1045 static int
1046 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1047 {
1048 	nv_ctl_t *nvc;
1049 	int inst;
1050 	int status;
1051 	int ctlr, port;
1052 	int drive;
1053 	uint8_t curr_led;
1054 	struct dc_led_ctl led;
1055 
1056 	inst = getminor(dev);
1057 	if (inst == -1) {
1058 		return (EBADF);
1059 	}
1060 
1061 	nvc = ddi_get_soft_state(nv_statep, inst);
1062 	if (nvc == NULL) {
1063 		return (EBADF);
1064 	}
1065 
1066 	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1067 		return (EIO);
1068 	}
1069 
1070 	switch (cmd) {
1071 	case DEVCTL_SET_LED:
1072 		status = ddi_copyin((void *)arg, &led,
1073 		    sizeof (struct dc_led_ctl), mode);
1074 		if (status != 0)
1075 			return (EFAULT);
1076 
1077 		/*
1078 		 * Since only the first two controller currently support
1079 		 * SGPIO (as per NVIDIA docs), this code will as well.
1080 		 * Note that this validate the port value within led_state
1081 		 * as well.
1082 		 */
1083 
1084 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1085 		if ((ctlr != 0) && (ctlr != 1))
1086 			return (ENXIO);
1087 
1088 		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1089 		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1090 			return (EINVAL);
1091 		}
1092 
1093 		drive = led.led_number;
1094 
1095 		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1096 		    (led.led_state == DCL_STATE_OFF)) {
1097 
1098 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1099 				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1100 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1101 				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1102 			} else {
1103 				return (ENXIO);
1104 			}
1105 
1106 			port = SGP_DRV_TO_PORT(led.led_number);
1107 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1108 		}
1109 
1110 		if (led.led_ctl_active == DCL_CNTRL_ON) {
1111 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1112 				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1113 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1114 				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1115 			} else {
1116 				return (ENXIO);
1117 			}
1118 
1119 			port = SGP_DRV_TO_PORT(led.led_number);
1120 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1121 		}
1122 
1123 		break;
1124 
1125 	case DEVCTL_GET_LED:
1126 		status = ddi_copyin((void *)arg, &led,
1127 		    sizeof (struct dc_led_ctl), mode);
1128 		if (status != 0)
1129 			return (EFAULT);
1130 
1131 		/*
1132 		 * Since only the first two controller currently support
1133 		 * SGPIO (as per NVIDIA docs), this code will as well.
1134 		 * Note that this validate the port value within led_state
1135 		 * as well.
1136 		 */
1137 
1138 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1139 		if ((ctlr != 0) && (ctlr != 1))
1140 			return (ENXIO);
1141 
1142 		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1143 		    led.led_number);
1144 
1145 		port = SGP_DRV_TO_PORT(led.led_number);
1146 		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1147 			led.led_ctl_active = DCL_CNTRL_ON;
1148 
1149 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1150 				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1151 					led.led_state = DCL_STATE_OFF;
1152 				else
1153 					led.led_state = DCL_STATE_ON;
1154 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1155 				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1156 					led.led_state = DCL_STATE_OFF;
1157 				else
1158 					led.led_state = DCL_STATE_ON;
1159 			} else {
1160 				return (ENXIO);
1161 			}
1162 		} else {
1163 			led.led_ctl_active = DCL_CNTRL_OFF;
1164 			/*
1165 			 * Not really off, but never set and no constant for
1166 			 * tri-state
1167 			 */
1168 			led.led_state = DCL_STATE_OFF;
1169 		}
1170 
1171 		status = ddi_copyout(&led, (void *)arg,
1172 		    sizeof (struct dc_led_ctl), mode);
1173 		if (status != 0)
1174 			return (EFAULT);
1175 
1176 		break;
1177 
1178 	case DEVCTL_NUM_LEDS:
1179 		led.led_number = SGPIO_DRV_CNT_VALUE;
1180 		led.led_ctl_active = 1;
1181 		led.led_type = 3;
1182 
1183 		/*
1184 		 * According to documentation, NVIDIA SGPIO is supposed to
1185 		 * support blinking, but it does not seem to work in practice.
1186 		 */
1187 		led.led_state = DCL_STATE_ON;
1188 
1189 		status = ddi_copyout(&led, (void *)arg,
1190 		    sizeof (struct dc_led_ctl), mode);
1191 		if (status != 0)
1192 			return (EFAULT);
1193 
1194 		break;
1195 
1196 	default:
1197 		return (EINVAL);
1198 	}
1199 
1200 	return (0);
1201 }
1202 #endif	/* SGPIO_SUPPORT */
1203 
1204 
1205 /*
1206  * Called by sata module to probe a port.  Port and device state
1207  * are not changed here... only reported back to the sata module.
1208  *
1209  */
1210 static int
1211 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1212 {
1213 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1214 	uint8_t cport = sd->satadev_addr.cport;
1215 	uint8_t pmport = sd->satadev_addr.pmport;
1216 	uint8_t qual = sd->satadev_addr.qual;
1217 	nv_port_t *nvp;
1218 
1219 	if (cport >= NV_MAX_PORTS(nvc)) {
1220 		sd->satadev_type = SATA_DTYPE_NONE;
1221 		sd->satadev_state = SATA_STATE_UNKNOWN;
1222 
1223 		return (SATA_FAILURE);
1224 	}
1225 
1226 	ASSERT(nvc->nvc_port != NULL);
1227 	nvp = &(nvc->nvc_port[cport]);
1228 	ASSERT(nvp != NULL);
1229 
1230 	NVLOG(NVDBG_ENTRY, nvc, nvp,
1231 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1232 	    "qual: 0x%x", cport, pmport, qual);
1233 
1234 	mutex_enter(&nvp->nvp_mutex);
1235 
1236 	/*
1237 	 * This check seems to be done in the SATA module.
1238 	 * It may not be required here
1239 	 */
1240 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1241 		nv_cmn_err(CE_WARN, nvc, nvp,
1242 		    "port inactive.  Use cfgadm to activate");
1243 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1244 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1245 		mutex_exit(&nvp->nvp_mutex);
1246 
1247 		return (SATA_SUCCESS);
1248 	}
1249 
1250 	if (nvp->nvp_state & NV_PORT_FAILED) {
1251 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
1252 		    "probe: port failed", NULL);
1253 		sd->satadev_type = SATA_DTYPE_NONE;
1254 		sd->satadev_state = SATA_PSTATE_FAILED;
1255 		mutex_exit(&nvp->nvp_mutex);
1256 
1257 		return (SATA_SUCCESS);
1258 	}
1259 
1260 	if (qual == SATA_ADDR_PMPORT) {
1261 		sd->satadev_type = SATA_DTYPE_NONE;
1262 		sd->satadev_state = SATA_STATE_UNKNOWN;
1263 		mutex_exit(&nvp->nvp_mutex);
1264 		nv_cmn_err(CE_WARN, nvc, nvp,
1265 		    "controller does not support port multiplier");
1266 
1267 		return (SATA_SUCCESS);
1268 	}
1269 
1270 	sd->satadev_state = SATA_PSTATE_PWRON;
1271 
1272 	nv_copy_registers(nvp, sd, NULL);
1273 
1274 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
1275 		/*
1276 		 * We are waiting for reset to complete and to fetch
1277 		 * a signature.
1278 		 * Reset will cause the link to go down for a short period of
1279 		 * time.  If reset processing continues for less than
1280 		 * NV_LINK_DOWN_TIMEOUT, fake the status of the link so that
1281 		 * we will not report intermittent link down.
1282 		 * Maybe we should report previous link state?
1283 		 */
1284 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) <
1285 		    NV_LINK_DOWN_TIMEOUT) {
1286 			SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1287 			    SSTATUS_IPM_ACTIVE);
1288 			SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1289 			    SSTATUS_DET_DEVPRE_PHYCOM);
1290 			sd->satadev_type = nvp->nvp_type;
1291 			mutex_exit(&nvp->nvp_mutex);
1292 
1293 			return (SATA_SUCCESS);
1294 		}
1295 	}
1296 	/*
1297 	 * Just report the current port state
1298 	 */
1299 	sd->satadev_type = nvp->nvp_type;
1300 	sd->satadev_state = nvp->nvp_state | SATA_PSTATE_PWRON;
1301 	mutex_exit(&nvp->nvp_mutex);
1302 
1303 #ifdef SGPIO_SUPPORT
1304 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
1305 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1306 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1307 	} else {
1308 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1309 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1310 	}
1311 #endif
1312 
1313 	return (SATA_SUCCESS);
1314 }
1315 
1316 
1317 /*
1318  * Called by sata module to start a new command.
1319  */
1320 static int
1321 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1322 {
1323 	int cport = spkt->satapkt_device.satadev_addr.cport;
1324 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1325 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1326 	int ret;
1327 
1328 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1329 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg);
1330 
1331 	mutex_enter(&nvp->nvp_mutex);
1332 
1333 	/*
1334 	 * record number of commands for debugging
1335 	 */
1336 	nvp->nvp_seq++;
1337 
1338 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1339 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1340 		NVLOG(NVDBG_ERRS, nvc, nvp,
1341 		    "nv_sata_start: port not yet initialized", NULL);
1342 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1343 		mutex_exit(&nvp->nvp_mutex);
1344 
1345 		return (SATA_TRAN_PORT_ERROR);
1346 	}
1347 
1348 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1349 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1350 		NVLOG(NVDBG_ERRS, nvc, nvp,
1351 		    "nv_sata_start: NV_PORT_INACTIVE", NULL);
1352 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1353 		mutex_exit(&nvp->nvp_mutex);
1354 
1355 		return (SATA_TRAN_PORT_ERROR);
1356 	}
1357 
1358 	if (nvp->nvp_state & NV_PORT_FAILED) {
1359 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1360 		NVLOG(NVDBG_ERRS, nvc, nvp,
1361 		    "nv_sata_start: NV_PORT_FAILED state", NULL);
1362 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1363 		mutex_exit(&nvp->nvp_mutex);
1364 
1365 		return (SATA_TRAN_PORT_ERROR);
1366 	}
1367 
1368 	if (nvp->nvp_state & NV_PORT_RESET) {
1369 		NVLOG(NVDBG_ERRS, nvc, nvp,
1370 		    "still waiting for reset completion", NULL);
1371 		spkt->satapkt_reason = SATA_PKT_BUSY;
1372 		mutex_exit(&nvp->nvp_mutex);
1373 
1374 		/*
1375 		 * If in panic, timeouts do not occur, so fake one
1376 		 * so that the signature can be acquired to complete
1377 		 * the reset handling.
1378 		 */
1379 		if (ddi_in_panic()) {
1380 			nv_timeout(nvp);
1381 		}
1382 
1383 		return (SATA_TRAN_BUSY);
1384 	}
1385 
1386 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1387 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1388 		NVLOG(NVDBG_ERRS, nvc, nvp,
1389 		    "nv_sata_start: SATA_DTYPE_NONE", NULL);
1390 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1391 		mutex_exit(&nvp->nvp_mutex);
1392 
1393 		return (SATA_TRAN_PORT_ERROR);
1394 	}
1395 
1396 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1397 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1398 		nv_cmn_err(CE_WARN, nvc, nvp,
1399 		    "port multipliers not supported by controller");
1400 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1401 		mutex_exit(&nvp->nvp_mutex);
1402 
1403 		return (SATA_TRAN_CMD_UNSUPPORTED);
1404 	}
1405 
1406 	/*
1407 	 * after a device reset, and then when sata module restore processing
1408 	 * is complete, the sata module will set sata_clear_dev_reset which
1409 	 * indicates that restore processing has completed and normal
1410 	 * non-restore related commands should be processed.
1411 	 */
1412 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1413 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1414 		NVLOG(NVDBG_RESET, nvc, nvp,
1415 		    "nv_sata_start: clearing NV_PORT_RESTORE", NULL);
1416 	}
1417 
1418 	/*
1419 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1420 	 * only allow commands which restore device state.  The sata module
1421 	 * marks such commands with with sata_ignore_dev_reset.
1422 	 *
1423 	 * during coredump, nv_reset is called and but then the restore
1424 	 * doesn't happen.  For now, workaround by ignoring the wait for
1425 	 * restore if the system is panicing.
1426 	 */
1427 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1428 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1429 	    (ddi_in_panic() == 0)) {
1430 		spkt->satapkt_reason = SATA_PKT_BUSY;
1431 		NVLOG(NVDBG_RESET, nvc, nvp,
1432 		    "nv_sata_start: waiting for restore ", NULL);
1433 		mutex_exit(&nvp->nvp_mutex);
1434 
1435 		return (SATA_TRAN_BUSY);
1436 	}
1437 
1438 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1439 		spkt->satapkt_reason = SATA_PKT_BUSY;
1440 		NVLOG(NVDBG_ERRS, nvc, nvp,
1441 		    "nv_sata_start: NV_PORT_ABORTING", NULL);
1442 		mutex_exit(&nvp->nvp_mutex);
1443 
1444 		return (SATA_TRAN_BUSY);
1445 	}
1446 
1447 	/* Clear SError to be able to check errors after the command failure */
1448 	nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1449 
1450 	if (spkt->satapkt_op_mode &
1451 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1452 
1453 		ret = nv_start_sync(nvp, spkt);
1454 
1455 		mutex_exit(&nvp->nvp_mutex);
1456 
1457 		return (ret);
1458 	}
1459 
1460 	/*
1461 	 * start command asynchronous command
1462 	 */
1463 	ret = nv_start_async(nvp, spkt);
1464 
1465 	mutex_exit(&nvp->nvp_mutex);
1466 
1467 	return (ret);
1468 }
1469 
1470 
1471 /*
1472  * SATA_OPMODE_POLLING implies the driver is in a
1473  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1474  * If only SATA_OPMODE_SYNCH is set, the driver can use
1475  * interrupts and sleep wait on a cv.
1476  *
1477  * If SATA_OPMODE_POLLING is set, the driver can't use
1478  * interrupts and must busy wait and simulate the
1479  * interrupts by waiting for BSY to be cleared.
1480  *
1481  * Synchronous mode has to return BUSY if there are
1482  * any other commands already on the drive.
1483  */
1484 static int
1485 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1486 {
1487 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1488 	int ret;
1489 
1490 	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry",
1491 	    NULL);
1492 
1493 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1494 		spkt->satapkt_reason = SATA_PKT_BUSY;
1495 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1496 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1497 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1498 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1499 		    (&(nvp->nvp_slot[0]))->nvslot_spkt);
1500 
1501 		return (SATA_TRAN_BUSY);
1502 	}
1503 
1504 	/*
1505 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1506 	 */
1507 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1508 	    servicing_interrupt()) {
1509 		spkt->satapkt_reason = SATA_PKT_BUSY;
1510 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1511 		    "SYNC mode not allowed during interrupt", NULL);
1512 
1513 		return (SATA_TRAN_BUSY);
1514 
1515 	}
1516 
1517 	/*
1518 	 * disable interrupt generation if in polled mode
1519 	 */
1520 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1521 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1522 	}
1523 
1524 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1525 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1526 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1527 		}
1528 
1529 		return (ret);
1530 	}
1531 
1532 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1533 		mutex_exit(&nvp->nvp_mutex);
1534 		ret = nv_poll_wait(nvp, spkt);
1535 		mutex_enter(&nvp->nvp_mutex);
1536 
1537 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1538 
1539 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1540 		    " done % reason %d", ret);
1541 
1542 		return (ret);
1543 	}
1544 
1545 	/*
1546 	 * non-polling synchronous mode handling.  The interrupt will signal
1547 	 * when the IO is completed.
1548 	 */
1549 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1550 
1551 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1552 
1553 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1554 	}
1555 
1556 	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1557 	    " done % reason %d", spkt->satapkt_reason);
1558 
1559 	return (SATA_TRAN_ACCEPTED);
1560 }
1561 
1562 
1563 static int
1564 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1565 {
1566 	int ret;
1567 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1568 #if ! defined(__lock_lint)
1569 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1570 #endif
1571 
1572 	NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter", NULL);
1573 
1574 	for (;;) {
1575 
1576 		NV_DELAY_NSEC(400);
1577 
1578 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait",
1579 		    NULL);
1580 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1581 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1582 			mutex_enter(&nvp->nvp_mutex);
1583 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1584 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1585 			nvp->nvp_state |= NV_PORT_RESET;
1586 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1587 			    NV_PORT_RESET_RETRY);
1588 			nv_reset(nvp, "poll_wait");
1589 			nv_complete_io(nvp, spkt, 0);
1590 			mutex_exit(&nvp->nvp_mutex);
1591 			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1592 			    "SATA_STATUS_BSY", NULL);
1593 
1594 			return (SATA_TRAN_ACCEPTED);
1595 		}
1596 
1597 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr",
1598 		    NULL);
1599 
1600 		/*
1601 		 * Simulate interrupt.
1602 		 */
1603 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1604 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr",
1605 		    NULL);
1606 
1607 		if (ret != DDI_INTR_CLAIMED) {
1608 			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1609 			    " unclaimed -- resetting", NULL);
1610 			mutex_enter(&nvp->nvp_mutex);
1611 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1612 			nvp->nvp_state |= NV_PORT_RESET;
1613 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1614 			    NV_PORT_RESET_RETRY);
1615 			nv_reset(nvp, "poll_wait intr not claimed");
1616 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1617 			nv_complete_io(nvp, spkt, 0);
1618 			mutex_exit(&nvp->nvp_mutex);
1619 
1620 			return (SATA_TRAN_ACCEPTED);
1621 		}
1622 
1623 #if ! defined(__lock_lint)
1624 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1625 			/*
1626 			 * packet is complete
1627 			 */
1628 			return (SATA_TRAN_ACCEPTED);
1629 		}
1630 #endif
1631 	}
1632 	/*NOTREACHED*/
1633 }
1634 
1635 
1636 /*
1637  * Called by sata module to abort outstanding packets.
1638  */
1639 /*ARGSUSED*/
1640 static int
1641 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1642 {
1643 	int cport = spkt->satapkt_device.satadev_addr.cport;
1644 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1645 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1646 	int c_a, ret;
1647 
1648 	ASSERT(cport < NV_MAX_PORTS(nvc));
1649 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt);
1650 
1651 	mutex_enter(&nvp->nvp_mutex);
1652 
1653 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1654 		mutex_exit(&nvp->nvp_mutex);
1655 		nv_cmn_err(CE_WARN, nvc, nvp,
1656 		    "abort request failed: port inactive");
1657 
1658 		return (SATA_FAILURE);
1659 	}
1660 
1661 	/*
1662 	 * spkt == NULL then abort all commands
1663 	 */
1664 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1665 
1666 	if (c_a) {
1667 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1668 		    "packets aborted running=%d", c_a);
1669 		ret = SATA_SUCCESS;
1670 	} else {
1671 		if (spkt == NULL) {
1672 			NVLOG(NVDBG_ENTRY, nvc, nvp, "no spkts to abort", NULL);
1673 		} else {
1674 			NVLOG(NVDBG_ENTRY, nvc, nvp,
1675 			    "can't find spkt to abort", NULL);
1676 		}
1677 		ret = SATA_FAILURE;
1678 	}
1679 
1680 	mutex_exit(&nvp->nvp_mutex);
1681 
1682 	return (ret);
1683 }
1684 
1685 
1686 /*
1687  * if spkt == NULL abort all pkts running, otherwise
1688  * abort the requested packet.  must be called with nv_mutex
1689  * held and returns with it held.  Not NCQ aware.
1690  */
1691 static int
1692 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason, int flag)
1693 {
1694 	int aborted = 0, i, reset_once = B_FALSE;
1695 	struct nv_slot *nv_slotp;
1696 	sata_pkt_t *spkt_slot;
1697 
1698 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1699 
1700 	/*
1701 	 * return if the port is not configured
1702 	 */
1703 	if (nvp->nvp_slot == NULL) {
1704 		NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1705 		    "nv_abort_active: not configured so returning", NULL);
1706 
1707 		return (0);
1708 	}
1709 
1710 	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active", NULL);
1711 
1712 	nvp->nvp_state |= NV_PORT_ABORTING;
1713 
1714 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1715 
1716 		nv_slotp = &(nvp->nvp_slot[i]);
1717 		spkt_slot = nv_slotp->nvslot_spkt;
1718 
1719 		/*
1720 		 * skip if not active command in slot
1721 		 */
1722 		if (spkt_slot == NULL) {
1723 			continue;
1724 		}
1725 
1726 		/*
1727 		 * if a specific packet was requested, skip if
1728 		 * this is not a match
1729 		 */
1730 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1731 			continue;
1732 		}
1733 
1734 		/*
1735 		 * stop the hardware.  This could need reworking
1736 		 * when NCQ is enabled in the driver.
1737 		 */
1738 		if (reset_once == B_FALSE) {
1739 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1740 
1741 			/*
1742 			 * stop DMA engine
1743 			 */
1744 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1745 
1746 			/*
1747 			 * Reset only if explicitly specified by the arg flag
1748 			 */
1749 			if (flag == B_TRUE) {
1750 				reset_once = B_TRUE;
1751 				nvp->nvp_state |= NV_PORT_RESET;
1752 				nvp->nvp_state &= ~(NV_PORT_RESTORE |
1753 				    NV_PORT_RESET_RETRY);
1754 				nv_reset(nvp, "abort_active");
1755 			}
1756 		}
1757 
1758 		spkt_slot->satapkt_reason = abort_reason;
1759 		nv_complete_io(nvp, spkt_slot, i);
1760 		aborted++;
1761 	}
1762 
1763 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1764 
1765 	return (aborted);
1766 }
1767 
1768 
1769 /*
1770  * Called by sata module to reset a port, device, or the controller.
1771  */
1772 static int
1773 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1774 {
1775 	int cport = sd->satadev_addr.cport;
1776 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1777 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1778 	int ret = SATA_SUCCESS;
1779 
1780 	ASSERT(cport < NV_MAX_PORTS(nvc));
1781 
1782 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_reset", NULL);
1783 
1784 	mutex_enter(&nvp->nvp_mutex);
1785 
1786 	switch (sd->satadev_addr.qual) {
1787 
1788 	case SATA_ADDR_CPORT:
1789 		/*FALLTHROUGH*/
1790 	case SATA_ADDR_DCPORT:
1791 		nvp->nvp_state |= NV_PORT_RESET;
1792 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1793 		nv_reset(nvp, "sata_reset");
1794 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1795 
1796 		break;
1797 	case SATA_ADDR_CNTRL:
1798 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1799 		    "nv_sata_reset: conroller reset not supported", NULL);
1800 
1801 		break;
1802 	case SATA_ADDR_PMPORT:
1803 	case SATA_ADDR_DPMPORT:
1804 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1805 		    "nv_sata_reset: port multipliers not supported", NULL);
1806 		/*FALLTHROUGH*/
1807 	default:
1808 		/*
1809 		 * unsupported case
1810 		 */
1811 		ret = SATA_FAILURE;
1812 		break;
1813 	}
1814 
1815 	if (ret == SATA_SUCCESS) {
1816 		/*
1817 		 * If the port is inactive, do a quiet reset and don't attempt
1818 		 * to wait for reset completion or do any post reset processing
1819 		 */
1820 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1821 			nvp->nvp_state &= ~NV_PORT_RESET;
1822 			nvp->nvp_reset_time = 0;
1823 		}
1824 
1825 		/*
1826 		 * clear the port failed flag
1827 		 */
1828 		nvp->nvp_state &= ~NV_PORT_FAILED;
1829 	}
1830 
1831 	mutex_exit(&nvp->nvp_mutex);
1832 
1833 	return (ret);
1834 }
1835 
1836 
1837 /*
1838  * Sata entry point to handle port activation.  cfgadm -c connect
1839  */
1840 static int
1841 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1842 {
1843 	int cport = sd->satadev_addr.cport;
1844 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1845 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1846 
1847 	ASSERT(cport < NV_MAX_PORTS(nvc));
1848 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_activate", NULL);
1849 
1850 	mutex_enter(&nvp->nvp_mutex);
1851 
1852 	sd->satadev_state = SATA_STATE_READY;
1853 
1854 	nv_copy_registers(nvp, sd, NULL);
1855 
1856 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1857 
1858 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1859 	/* Initiate link probing and device signature acquisition */
1860 	nvp->nvp_type = SATA_DTYPE_NONE;
1861 	nvp->nvp_signature = 0;
1862 	nvp->nvp_state |= NV_PORT_RESET; /* | NV_PORT_PROBE; */
1863 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
1864 	nv_reset(nvp, "sata_activate");
1865 
1866 	mutex_exit(&nvp->nvp_mutex);
1867 
1868 	return (SATA_SUCCESS);
1869 }
1870 
1871 
1872 /*
1873  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1874  */
1875 static int
1876 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1877 {
1878 	int cport = sd->satadev_addr.cport;
1879 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1880 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1881 
1882 	ASSERT(cport < NV_MAX_PORTS(nvc));
1883 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate", NULL);
1884 
1885 	mutex_enter(&nvp->nvp_mutex);
1886 
1887 	(void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1888 
1889 	/*
1890 	 * make the device inaccessible
1891 	 */
1892 	nvp->nvp_state |= NV_PORT_INACTIVE;
1893 
1894 	/*
1895 	 * disable the interrupts on port
1896 	 */
1897 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1898 
1899 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1900 	nv_copy_registers(nvp, sd, NULL);
1901 
1902 	mutex_exit(&nvp->nvp_mutex);
1903 
1904 	return (SATA_SUCCESS);
1905 }
1906 
1907 
1908 /*
1909  * find an empty slot in the driver's queue, increment counters,
1910  * and then invoke the appropriate PIO or DMA start routine.
1911  */
1912 static int
1913 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1914 {
1915 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1916 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1917 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1918 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1919 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1920 	nv_slot_t *nv_slotp;
1921 	boolean_t dma_cmd;
1922 
1923 	NVLOG(NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1924 	    sata_cmdp->satacmd_cmd_reg);
1925 
1926 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1927 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1928 		nvp->nvp_ncq_run++;
1929 		/*
1930 		 * search for an empty NCQ slot.  by the time, it's already
1931 		 * been determined by the caller that there is room on the
1932 		 * queue.
1933 		 */
1934 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1935 		    on_bit <<= 1) {
1936 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1937 				break;
1938 			}
1939 		}
1940 
1941 		/*
1942 		 * the first empty slot found, should not exceed the queue
1943 		 * depth of the drive.  if it does it's an error.
1944 		 */
1945 		ASSERT(slot != nvp->nvp_queue_depth);
1946 
1947 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1948 		    nvp->nvp_sactive);
1949 		ASSERT((sactive & on_bit) == 0);
1950 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1951 		NVLOG(NVDBG_DELIVER, nvc, nvp, "setting SACTIVE onbit: %X",
1952 		    on_bit);
1953 		nvp->nvp_sactive_cache |= on_bit;
1954 
1955 		ncq = NVSLOT_NCQ;
1956 
1957 	} else {
1958 		nvp->nvp_non_ncq_run++;
1959 		slot = 0;
1960 	}
1961 
1962 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1963 
1964 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1965 
1966 	nv_slotp->nvslot_spkt = spkt;
1967 	nv_slotp->nvslot_flags = ncq;
1968 
1969 	/*
1970 	 * the sata module doesn't indicate which commands utilize the
1971 	 * DMA engine, so find out using this switch table.
1972 	 */
1973 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1974 	case SATAC_READ_DMA_EXT:
1975 	case SATAC_WRITE_DMA_EXT:
1976 	case SATAC_WRITE_DMA:
1977 	case SATAC_READ_DMA:
1978 	case SATAC_READ_DMA_QUEUED:
1979 	case SATAC_READ_DMA_QUEUED_EXT:
1980 	case SATAC_WRITE_DMA_QUEUED:
1981 	case SATAC_WRITE_DMA_QUEUED_EXT:
1982 	case SATAC_READ_FPDMA_QUEUED:
1983 	case SATAC_WRITE_FPDMA_QUEUED:
1984 		dma_cmd = B_TRUE;
1985 		break;
1986 	default:
1987 		dma_cmd = B_FALSE;
1988 	}
1989 
1990 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1991 		NVLOG(NVDBG_DELIVER, nvc,  nvp, "DMA command", NULL);
1992 		nv_slotp->nvslot_start = nv_start_dma;
1993 		nv_slotp->nvslot_intr = nv_intr_dma;
1994 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1995 		NVLOG(NVDBG_DELIVER, nvc,  nvp, "packet command", NULL);
1996 		nv_slotp->nvslot_start = nv_start_pkt_pio;
1997 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1998 		if ((direction == SATA_DIR_READ) ||
1999 		    (direction == SATA_DIR_WRITE)) {
2000 			nv_slotp->nvslot_byte_count =
2001 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2002 			nv_slotp->nvslot_v_addr =
2003 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2004 			/*
2005 			 * Freeing DMA resources allocated by the framework
2006 			 * now to avoid buffer overwrite (dma sync) problems
2007 			 * when the buffer is released at command completion.
2008 			 * Primarily an issue on systems with more than
2009 			 * 4GB of memory.
2010 			 */
2011 			sata_free_dma_resources(spkt);
2012 		}
2013 	} else if (direction == SATA_DIR_NODATA_XFER) {
2014 		NVLOG(NVDBG_DELIVER, nvc, nvp, "non-data command", NULL);
2015 		nv_slotp->nvslot_start = nv_start_nodata;
2016 		nv_slotp->nvslot_intr = nv_intr_nodata;
2017 	} else if (direction == SATA_DIR_READ) {
2018 		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio in command", NULL);
2019 		nv_slotp->nvslot_start = nv_start_pio_in;
2020 		nv_slotp->nvslot_intr = nv_intr_pio_in;
2021 		nv_slotp->nvslot_byte_count =
2022 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2023 		nv_slotp->nvslot_v_addr =
2024 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2025 		/*
2026 		 * Freeing DMA resources allocated by the framework now to
2027 		 * avoid buffer overwrite (dma sync) problems when the buffer
2028 		 * is released at command completion.  This is not an issue
2029 		 * for write because write does not update the buffer.
2030 		 * Primarily an issue on systems with more than 4GB of memory.
2031 		 */
2032 		sata_free_dma_resources(spkt);
2033 	} else if (direction == SATA_DIR_WRITE) {
2034 		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio out command", NULL);
2035 		nv_slotp->nvslot_start = nv_start_pio_out;
2036 		nv_slotp->nvslot_intr = nv_intr_pio_out;
2037 		nv_slotp->nvslot_byte_count =
2038 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2039 		nv_slotp->nvslot_v_addr =
2040 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2041 	} else {
2042 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2043 		    " %d cookies %d cmd %x",
2044 		    sata_cmdp->satacmd_flags.sata_data_direction,
2045 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2046 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2047 		ret = SATA_TRAN_CMD_UNSUPPORTED;
2048 
2049 		goto fail;
2050 	}
2051 
2052 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2053 	    SATA_TRAN_ACCEPTED) {
2054 #ifdef SGPIO_SUPPORT
2055 		nv_sgp_drive_active(nvp->nvp_ctlp,
2056 		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2057 #endif
2058 		nv_slotp->nvslot_stime = ddi_get_lbolt();
2059 
2060 		/*
2061 		 * start timer if it's not already running and this packet
2062 		 * is not requesting polled mode.
2063 		 */
2064 		if ((nvp->nvp_timeout_id == 0) &&
2065 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2066 			nv_setup_timeout(nvp, NV_ONE_SEC);
2067 		}
2068 
2069 		nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2070 		nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2071 
2072 		return (SATA_TRAN_ACCEPTED);
2073 	}
2074 
2075 	fail:
2076 
2077 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2078 
2079 	if (ncq == NVSLOT_NCQ) {
2080 		nvp->nvp_ncq_run--;
2081 		nvp->nvp_sactive_cache &= ~on_bit;
2082 	} else {
2083 		nvp->nvp_non_ncq_run--;
2084 	}
2085 	nv_slotp->nvslot_spkt = NULL;
2086 	nv_slotp->nvslot_flags = 0;
2087 
2088 	return (ret);
2089 }
2090 
2091 
2092 /*
2093  * Check if the signature is ready and if non-zero translate
2094  * it into a solaris sata defined type.
2095  */
2096 static void
2097 nv_read_signature(nv_port_t *nvp)
2098 {
2099 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2100 	int retry_once = 0;
2101 
2102 	retry:
2103 	/*
2104 	 * Task file error register bit 0 set to 1 indicate that drive
2105 	 * is ready and have sent D2H FIS with a signature.
2106 	 */
2107 	if (nv_check_tfr_error != 0) {
2108 		uint8_t tfr_error = nv_get8(cmdhdl, nvp->nvp_error);
2109 		if (!(tfr_error & SATA_ERROR_ILI)) {
2110 			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2111 			    "nv_read_signature: signature not ready", NULL);
2112 
2113 			return;
2114 		}
2115 	}
2116 
2117 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2118 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2119 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2120 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2121 
2122 	NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2123 	    "nv_read_signature: 0x%x ", nvp->nvp_signature);
2124 
2125 	switch (nvp->nvp_signature) {
2126 
2127 	case NV_SIG_DISK:
2128 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp, "drive is a disk", NULL);
2129 		nvp->nvp_type = SATA_DTYPE_ATADISK;
2130 		break;
2131 	case NV_SIG_ATAPI:
2132 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2133 		    "drive is an optical device", NULL);
2134 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2135 		break;
2136 	case NV_SIG_PM:
2137 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2138 		    "device is a port multiplier", NULL);
2139 		nvp->nvp_type = SATA_DTYPE_PMULT;
2140 		break;
2141 	case NV_SIG_NOTREADY:
2142 		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2143 		    "signature not ready", NULL);
2144 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2145 		break;
2146 	default:
2147 		if (retry_once++ == 0) {
2148 			/*
2149 			 * this is a rare corner case where the controller
2150 			 * was in the middle of updating the registers as the
2151 			 * driver is reading them.  If this happens, wait a
2152 			 * bit and retry, but just once.
2153 			 */
2154 			NV_DELAY_NSEC(1000000);
2155 
2156 			goto retry;
2157 		}
2158 
2159 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2160 		    " recognized", nvp->nvp_signature);
2161 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2162 		break;
2163 	}
2164 
2165 	if (nvp->nvp_signature) {
2166 		nvp->nvp_state &= ~(NV_PORT_RESET_RETRY | NV_PORT_RESET);
2167 	}
2168 
2169 #ifdef SGPIO_SUPPORT
2170 	if (nvp->nvp_signature == NV_SIG_DISK) {
2171 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2172 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2173 	} else {
2174 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2175 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2176 	}
2177 #endif
2178 }
2179 
2180 
2181 /*
2182  * Set up a new timeout or complete a timeout.
2183  * Timeout value has to be specified in microseconds. If time is zero, no new
2184  * timeout is scheduled.
2185  * Must be called at the end of the timeout routine.
2186  */
2187 static void
2188 nv_setup_timeout(nv_port_t *nvp, int time)
2189 {
2190 	clock_t old_duration = nvp->nvp_timeout_duration;
2191 
2192 	ASSERT(time != 0);
2193 
2194 	if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2195 		/*
2196 		 * Since we are dropping the mutex for untimeout,
2197 		 * the timeout may be executed while we are trying to
2198 		 * untimeout and setting up a new timeout.
2199 		 * If nvp_timeout_duration is 0, then this function
2200 		 * was re-entered. Just exit.
2201 		 */
2202 	cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2203 		return;
2204 	}
2205 	nvp->nvp_timeout_duration = 0;
2206 	if (nvp->nvp_timeout_id == 0) {
2207 		/* Start new timer */
2208 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2209 		    drv_usectohz(time));
2210 	} else {
2211 		/*
2212 		 * If the currently running timeout is due later than the
2213 		 * requested one, restart it with a new expiration.
2214 		 * Our timeouts do not need to be accurate - we would be just
2215 		 * checking that the specified time was exceeded.
2216 		 */
2217 		if (old_duration > time) {
2218 			mutex_exit(&nvp->nvp_mutex);
2219 			(void) untimeout(nvp->nvp_timeout_id);
2220 			mutex_enter(&nvp->nvp_mutex);
2221 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2222 			    drv_usectohz(time));
2223 		}
2224 	}
2225 	nvp->nvp_timeout_duration = time;
2226 }
2227 
2228 
2229 
2230 int nv_reset_length = NV_RESET_LENGTH;
2231 
2232 /*
2233  * Reset the port
2234  *
2235  * Entered with nvp mutex held
2236  */
2237 static void
2238 nv_reset(nv_port_t *nvp, char *reason)
2239 {
2240 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2241 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2242 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2243 	uint32_t sctrl, serr, sstatus;
2244 	uint8_t bmicx;
2245 	int i, j, reset = 0;
2246 
2247 	ASSERT(mutex_owned(&nvp->nvp_mutex));
2248 
2249 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2250 
2251 	/*
2252 	 * stop DMA engine.
2253 	 */
2254 	bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2255 	nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2256 
2257 	nvp->nvp_state |= NV_PORT_RESET;
2258 	nvp->nvp_reset_time = ddi_get_lbolt();
2259 	nvp->nvp_reset_count++;
2260 
2261 	if (strcmp(reason, "attach") != 0) {
2262 		nv_cmn_err(CE_NOTE, nvc, nvp, "nv_reset: reason: %s serr 0x%x",
2263 		    reason, serr);
2264 		/*
2265 		 * keep a record of why the first reset occurred, for debugging
2266 		 */
2267 		if (nvp->nvp_first_reset_reason[0] == '\0') {
2268 			(void) strncpy(nvp->nvp_first_reset_reason,
2269 			    reason, NV_REASON_LEN);
2270 			nvp->nvp_first_reset_reason[NV_REASON_LEN - 1] = '\0';
2271 		}
2272 	}
2273 
2274 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset_count: %d",
2275 	    nvp->nvp_reset_count);
2276 
2277 	(void) strncpy(nvp->nvp_reset_reason, reason, NV_REASON_LEN);
2278 
2279 	/*
2280 	 * ensure there is terminating NULL
2281 	 */
2282 	nvp->nvp_reset_reason[NV_REASON_LEN - 1] = '\0';
2283 
2284 	/*
2285 	 * Issue hardware reset; retry if necessary.
2286 	 */
2287 	for (i = 0; i < NV_RESET_ATTEMPTS; i++) {
2288 		/*
2289 		 * Clear signature registers
2290 		 */
2291 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
2292 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2293 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2294 		nv_put8(cmdhdl, nvp->nvp_count, 0);
2295 
2296 		/* Clear task file error register */
2297 		nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2298 
2299 		/*
2300 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
2301 		 */
2302 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2303 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2304 		    sctrl | SCONTROL_DET_COMRESET);
2305 
2306 		/* Wait at least 1ms, as required by the spec */
2307 		drv_usecwait(nv_reset_length);
2308 
2309 		/* Reset all accumulated error bits */
2310 		nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2311 
2312 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2313 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2314 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2315 		    "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus);
2316 
2317 		/* de-assert reset in PHY */
2318 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2319 		    sctrl & ~SCONTROL_DET_COMRESET);
2320 
2321 		/*
2322 		 * Wait up to 10ms for COMINIT to arrive, indicating that
2323 		 * the device recognized COMRESET.
2324 		 */
2325 		for (j = 0; j < 10; j++) {
2326 			drv_usecwait(NV_ONE_MSEC);
2327 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2328 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2329 			    (SSTATUS_GET_DET(sstatus) ==
2330 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2331 				reset = 1;
2332 				break;
2333 			}
2334 		}
2335 		if (reset == 1)
2336 			break;
2337 	}
2338 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2339 	if (reset == 0) {
2340 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2341 		    "(serr 0x%x) after %d attempts", serr, i);
2342 	} else {
2343 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset succeeded (serr 0x%x)"
2344 		    "after %dms", serr, TICK_TO_MSEC(ddi_get_lbolt() -
2345 		    nvp->nvp_reset_time));
2346 	}
2347 	nvp->nvp_reset_time = ddi_get_lbolt();
2348 
2349 	if (servicing_interrupt()) {
2350 		nv_setup_timeout(nvp, NV_ONE_MSEC);
2351 	} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
2352 		nv_monitor_reset(nvp);
2353 	}
2354 }
2355 
2356 
2357 /*
2358  * Initialize register handling specific to mcp51/mcp55
2359  */
2360 /* ARGSUSED */
2361 static void
2362 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2363 {
2364 	nv_port_t *nvp;
2365 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2366 	uint8_t off, port;
2367 
2368 	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2369 	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2370 
2371 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2372 		nvp = &(nvc->nvc_port[port]);
2373 		nvp->nvp_mcp5x_int_status =
2374 		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2375 		nvp->nvp_mcp5x_int_ctl =
2376 		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2377 
2378 		/*
2379 		 * clear any previous interrupts asserted
2380 		 */
2381 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2382 		    MCP5X_INT_CLEAR);
2383 
2384 		/*
2385 		 * These are the interrupts to accept for now.  The spec
2386 		 * says these are enable bits, but nvidia has indicated
2387 		 * these are masking bits.  Even though they may be masked
2388 		 * out to prevent asserting the main interrupt, they can
2389 		 * still be asserted while reading the interrupt status
2390 		 * register, so that needs to be considered in the interrupt
2391 		 * handler.
2392 		 */
2393 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2394 		    ~(MCP5X_INT_IGNORE));
2395 	}
2396 
2397 	/*
2398 	 * Allow the driver to program the BM on the first command instead
2399 	 * of waiting for an interrupt.
2400 	 */
2401 #ifdef NCQ
2402 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2403 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2404 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2405 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2406 #endif
2407 
2408 	/*
2409 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2410 	 * Enable DMA to take advantage of that.
2411 	 *
2412 	 */
2413 	if (nvc->nvc_revid >= 0xa3) {
2414 		if (nv_sata_40bit_dma == B_TRUE) {
2415 			uint32_t reg32;
2416 			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2417 			    "rev id is %X.  40-bit DMA addressing"
2418 			    " enabled", nvc->nvc_revid);
2419 			nvc->dma_40bit = B_TRUE;
2420 
2421 			reg32 = pci_config_get32(pci_conf_handle,
2422 			    NV_SATA_CFG_20);
2423 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2424 			    reg32 | NV_40BIT_PRD);
2425 
2426 			/*
2427 			 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2428 			 * bits) for the primary PRD table, and bits 8-15
2429 			 * contain the top 8 bits for the secondary.  Set
2430 			 * to zero because the DMA attribute table for PRD
2431 			 * allocation forces it into 32 bit address space
2432 			 * anyway.
2433 			 */
2434 			reg32 = pci_config_get32(pci_conf_handle,
2435 			    NV_SATA_CFG_23);
2436 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2437 			    reg32 & 0xffff0000);
2438 		} else {
2439 			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2440 			    "40-bit DMA disabled by nv_sata_40bit_dma", NULL);
2441 		}
2442 	} else {
2443 		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2444 		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2445 	}
2446 }
2447 
2448 
2449 /*
2450  * Initialize register handling specific to ck804
2451  */
2452 static void
2453 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2454 {
2455 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2456 	uint32_t reg32;
2457 	uint16_t reg16;
2458 	nv_port_t *nvp;
2459 	int j;
2460 
2461 	/*
2462 	 * delay hotplug interrupts until PHYRDY.
2463 	 */
2464 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2465 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2466 	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2467 
2468 	/*
2469 	 * enable hot plug interrupts for channel x and y
2470 	 */
2471 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2472 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2473 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2474 	    NV_HIRQ_EN | reg16);
2475 
2476 
2477 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2478 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2479 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2480 	    NV_HIRQ_EN | reg16);
2481 
2482 	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2483 
2484 	/*
2485 	 * clear any existing interrupt pending then enable
2486 	 */
2487 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2488 		nvp = &(nvc->nvc_port[j]);
2489 		mutex_enter(&nvp->nvp_mutex);
2490 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2491 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2492 		mutex_exit(&nvp->nvp_mutex);
2493 	}
2494 }
2495 
2496 
2497 /*
2498  * Initialize the controller and set up driver data structures.
2499  * determine if ck804 or mcp5x class.
2500  */
2501 static int
2502 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2503 {
2504 	struct sata_hba_tran stran;
2505 	nv_port_t *nvp;
2506 	int j, ck804;
2507 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2508 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2509 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2510 	uint32_t reg32;
2511 	uint8_t reg8, reg8_save;
2512 
2513 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl entered", NULL);
2514 
2515 	ck804 = B_TRUE;
2516 #ifdef SGPIO_SUPPORT
2517 	nvc->nvc_mcp5x_flag = B_FALSE;
2518 #endif
2519 
2520 	/*
2521 	 * Need to set bit 2 to 1 at config offset 0x50
2522 	 * to enable access to the bar5 registers.
2523 	 */
2524 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2525 	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2526 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2527 		    reg32 | NV_BAR5_SPACE_EN);
2528 	}
2529 
2530 	/*
2531 	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2532 	 * task file registers into bar5 while mcp5x won't.  The offset of
2533 	 * the task file registers in mcp5x's space is unused, so it will
2534 	 * return zero.  So check one of the task file registers to see if it is
2535 	 * writable and reads back what was written.  If it's mcp5x it will
2536 	 * return back 0xff whereas ck804 will return the value written.
2537 	 */
2538 	reg8_save = nv_get8(bar5_hdl,
2539 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2540 
2541 
2542 	for (j = 1; j < 3; j++) {
2543 
2544 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2545 		reg8 = nv_get8(bar5_hdl,
2546 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2547 
2548 		if (reg8 != j) {
2549 			ck804 = B_FALSE;
2550 			nvc->nvc_mcp5x_flag = B_TRUE;
2551 			break;
2552 		}
2553 	}
2554 
2555 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2556 
2557 	if (ck804 == B_TRUE) {
2558 		NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804", NULL);
2559 		nvc->nvc_interrupt = ck804_intr;
2560 		nvc->nvc_reg_init = ck804_reg_init;
2561 		nvc->nvc_set_intr = ck804_set_intr;
2562 	} else {
2563 		NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55", NULL);
2564 		nvc->nvc_interrupt = mcp5x_intr;
2565 		nvc->nvc_reg_init = mcp5x_reg_init;
2566 		nvc->nvc_set_intr = mcp5x_set_intr;
2567 	}
2568 
2569 
2570 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2571 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2572 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2573 	stran.sata_tran_hba_features_support =
2574 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2575 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2576 	stran.sata_tran_probe_port = nv_sata_probe;
2577 	stran.sata_tran_start = nv_sata_start;
2578 	stran.sata_tran_abort = nv_sata_abort;
2579 	stran.sata_tran_reset_dport = nv_sata_reset;
2580 	stran.sata_tran_selftest = NULL;
2581 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2582 	stran.sata_tran_pwrmgt_ops = NULL;
2583 	stran.sata_tran_ioctl = NULL;
2584 	nvc->nvc_sata_hba_tran = stran;
2585 
2586 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2587 	    KM_SLEEP);
2588 
2589 	/*
2590 	 * initialize registers common to all chipsets
2591 	 */
2592 	nv_common_reg_init(nvc);
2593 
2594 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2595 		nvp = &(nvc->nvc_port[j]);
2596 
2597 		cmd_addr = nvp->nvp_cmd_addr;
2598 		ctl_addr = nvp->nvp_ctl_addr;
2599 		bm_addr = nvp->nvp_bm_addr;
2600 
2601 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2602 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2603 
2604 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2605 
2606 		nvp->nvp_data	= cmd_addr + NV_DATA;
2607 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2608 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2609 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2610 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2611 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2612 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2613 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2614 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2615 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2616 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2617 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2618 
2619 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2620 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2621 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2622 
2623 		nvp->nvp_state = 0;
2624 
2625 		/*
2626 		 * Initialize dma handles, etc.
2627 		 * If it fails, the port is in inactive state.
2628 		 */
2629 		(void) nv_init_port(nvp);
2630 	}
2631 
2632 	/*
2633 	 * initialize register by calling chip specific reg initialization
2634 	 */
2635 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2636 
2637 	/* initialize the hba dma attribute */
2638 	if (nvc->dma_40bit == B_TRUE)
2639 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2640 		    &buffer_dma_40bit_attr;
2641 	else
2642 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2643 		    &buffer_dma_attr;
2644 
2645 	return (NV_SUCCESS);
2646 }
2647 
2648 
2649 /*
2650  * Initialize data structures with enough slots to handle queuing, if
2651  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2652  * NCQ support is built into the driver and enabled.  It might have been
2653  * better to derive the true size from the drive itself, but the sata
2654  * module only sends down that information on the first NCQ command,
2655  * which means possibly re-sizing the structures on an interrupt stack,
2656  * making error handling more messy.  The easy way is to just allocate
2657  * all 32 slots, which is what most drives support anyway.
2658  */
2659 static int
2660 nv_init_port(nv_port_t *nvp)
2661 {
2662 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2663 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2664 	dev_info_t *dip = nvc->nvc_dip;
2665 	ddi_device_acc_attr_t dev_attr;
2666 	size_t buf_size;
2667 	ddi_dma_cookie_t cookie;
2668 	uint_t count;
2669 	int rc, i;
2670 
2671 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2672 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2673 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2674 
2675 	if (nvp->nvp_state & NV_PORT_INIT) {
2676 		NVLOG(NVDBG_INIT, nvc, nvp,
2677 		    "nv_init_port previously initialized", NULL);
2678 
2679 		return (NV_SUCCESS);
2680 	} else {
2681 		NVLOG(NVDBG_INIT, nvc, nvp, "nv_init_port initializing", NULL);
2682 	}
2683 
2684 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2685 	    NV_QUEUE_SLOTS, KM_SLEEP);
2686 
2687 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2688 	    NV_QUEUE_SLOTS, KM_SLEEP);
2689 
2690 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2691 	    NV_QUEUE_SLOTS, KM_SLEEP);
2692 
2693 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2694 	    NV_QUEUE_SLOTS, KM_SLEEP);
2695 
2696 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2697 	    KM_SLEEP);
2698 
2699 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2700 
2701 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2702 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2703 
2704 		if (rc != DDI_SUCCESS) {
2705 			nv_uninit_port(nvp);
2706 
2707 			return (NV_FAILURE);
2708 		}
2709 
2710 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2711 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2712 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2713 		    &(nvp->nvp_sg_acc_hdl[i]));
2714 
2715 		if (rc != DDI_SUCCESS) {
2716 			nv_uninit_port(nvp);
2717 
2718 			return (NV_FAILURE);
2719 		}
2720 
2721 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2722 		    nvp->nvp_sg_addr[i], buf_size,
2723 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2724 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2725 
2726 		if (rc != DDI_DMA_MAPPED) {
2727 			nv_uninit_port(nvp);
2728 
2729 			return (NV_FAILURE);
2730 		}
2731 
2732 		ASSERT(count == 1);
2733 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2734 
2735 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2736 
2737 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2738 	}
2739 
2740 	/*
2741 	 * nvp_queue_depth represents the actual drive queue depth, not the
2742 	 * number of slots allocated in the structures (which may be more).
2743 	 * Actual queue depth is only learned after the first NCQ command, so
2744 	 * initialize it to 1 for now.
2745 	 */
2746 	nvp->nvp_queue_depth = 1;
2747 
2748 	/*
2749 	 * Port is initialized whether the device is attached or not.
2750 	 * Link processing and device identification will be started later,
2751 	 * after interrupts are initialized.
2752 	 */
2753 	nvp->nvp_type = SATA_DTYPE_NONE;
2754 	nvp->nvp_signature = 0;
2755 
2756 	nvp->nvp_state |= NV_PORT_INIT;
2757 
2758 	return (NV_SUCCESS);
2759 }
2760 
2761 
2762 /*
2763  * Establish initial link & device type
2764  * Called only from nv_attach
2765  * Loops up to approximately 210ms; can exit earlier.
2766  * The time includes wait for the link up and completion of the initial
2767  * signature gathering operation.
2768  */
2769 static void
2770 nv_init_port_link_processing(nv_ctl_t *nvc)
2771 {
2772 	ddi_acc_handle_t bar5_hdl;
2773 	nv_port_t *nvp;
2774 	volatile uint32_t sstatus;
2775 	int port, links_up, ready_ports, i;
2776 
2777 
2778 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2779 		nvp = &(nvc->nvc_port[port]);
2780 		if (nvp != NULL && (nvp->nvp_state & NV_PORT_INIT)) {
2781 			/*
2782 			 * Initiate device identification, if any is attached
2783 			 * and reset was not already applied by hot-plug
2784 			 * event processing.
2785 			 */
2786 			mutex_enter(&nvp->nvp_mutex);
2787 			if (!(nvp->nvp_state & NV_PORT_RESET)) {
2788 				nvp->nvp_state |= NV_PORT_RESET | NV_PORT_PROBE;
2789 				nv_reset(nvp, "attach");
2790 			}
2791 			mutex_exit(&nvp->nvp_mutex);
2792 		}
2793 	}
2794 	/*
2795 	 * Wait up to 10ms for links up.
2796 	 * Spec says that link should be up in 1ms.
2797 	 */
2798 	for (i = 0; i < 10; i++) {
2799 		drv_usecwait(NV_ONE_MSEC);
2800 		links_up = 0;
2801 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2802 			nvp = &(nvc->nvc_port[port]);
2803 			mutex_enter(&nvp->nvp_mutex);
2804 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2805 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2806 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2807 			    (SSTATUS_GET_DET(sstatus) ==
2808 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2809 				if ((nvp->nvp_state & NV_PORT_RESET) &&
2810 				    nvp->nvp_type == SATA_DTYPE_NONE) {
2811 					nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2812 				}
2813 				NVLOG(NVDBG_INIT, nvc, nvp,
2814 				    "nv_init_port_link_processing()"
2815 				    "link up; time from reset %dms",
2816 				    TICK_TO_MSEC(ddi_get_lbolt() -
2817 				    nvp->nvp_reset_time));
2818 				links_up++;
2819 			}
2820 			mutex_exit(&nvp->nvp_mutex);
2821 		}
2822 		if (links_up == NV_MAX_PORTS(nvc)) {
2823 			break;
2824 		}
2825 	}
2826 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2827 	    "%d links up", links_up);
2828 	/*
2829 	 * At this point, if any device is attached, the link is established.
2830 	 * Wait till devices are ready to be accessed, no more than 200ms.
2831 	 * 200ms is empirical time in which a signature should be available.
2832 	 */
2833 	for (i = 0; i < 200; i++) {
2834 		ready_ports = 0;
2835 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2836 			nvp = &(nvc->nvc_port[port]);
2837 			mutex_enter(&nvp->nvp_mutex);
2838 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2839 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2840 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2841 			    (SSTATUS_GET_DET(sstatus) ==
2842 			    SSTATUS_DET_DEVPRE_PHYCOM) &&
2843 			    !(nvp->nvp_state & (NV_PORT_RESET |
2844 			    NV_PORT_RESET_RETRY))) {
2845 				/*
2846 				 * Reset already processed
2847 				 */
2848 				NVLOG(NVDBG_RESET, nvc, nvp,
2849 				    "nv_init_port_link_processing()"
2850 				    "device ready; port state %x; "
2851 				    "time from reset %dms", nvp->nvp_state,
2852 				    TICK_TO_MSEC(ddi_get_lbolt() -
2853 				    nvp->nvp_reset_time));
2854 
2855 				ready_ports++;
2856 			}
2857 			mutex_exit(&nvp->nvp_mutex);
2858 		}
2859 		if (ready_ports == links_up) {
2860 			break;
2861 		}
2862 		drv_usecwait(NV_ONE_MSEC);
2863 	}
2864 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2865 	    "%d devices ready", ready_ports);
2866 }
2867 
2868 /*
2869  * Free dynamically allocated structures for port.
2870  */
2871 static void
2872 nv_uninit_port(nv_port_t *nvp)
2873 {
2874 	int i;
2875 
2876 	/*
2877 	 * It is possible to reach here before a port has been initialized or
2878 	 * after it has already been uninitialized.  Just return in that case.
2879 	 */
2880 	if (nvp->nvp_slot == NULL) {
2881 
2882 		return;
2883 	}
2884 	/*
2885 	 * Mark port unusable now.
2886 	 */
2887 	nvp->nvp_state &= ~NV_PORT_INIT;
2888 
2889 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2890 	    "nv_uninit_port uninitializing", NULL);
2891 
2892 #ifdef SGPIO_SUPPORT
2893 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2894 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2895 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2896 	}
2897 #endif
2898 
2899 	nvp->nvp_type = SATA_DTYPE_NONE;
2900 
2901 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2902 		if (nvp->nvp_sg_paddr[i]) {
2903 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2904 		}
2905 
2906 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2907 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2908 		}
2909 
2910 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2911 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2912 		}
2913 	}
2914 
2915 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2916 	nvp->nvp_slot = NULL;
2917 
2918 	kmem_free(nvp->nvp_sg_dma_hdl,
2919 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2920 	nvp->nvp_sg_dma_hdl = NULL;
2921 
2922 	kmem_free(nvp->nvp_sg_acc_hdl,
2923 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2924 	nvp->nvp_sg_acc_hdl = NULL;
2925 
2926 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2927 	nvp->nvp_sg_addr = NULL;
2928 
2929 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2930 	nvp->nvp_sg_paddr = NULL;
2931 }
2932 
2933 
2934 /*
2935  * Cache register offsets and access handles to frequently accessed registers
2936  * which are common to either chipset.
2937  */
2938 static void
2939 nv_common_reg_init(nv_ctl_t *nvc)
2940 {
2941 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2942 	uchar_t *bm_addr_offset, *sreg_offset;
2943 	uint8_t bar, port;
2944 	nv_port_t *nvp;
2945 
2946 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2947 		if (port == 0) {
2948 			bar = NV_BAR_0;
2949 			bm_addr_offset = 0;
2950 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2951 		} else {
2952 			bar = NV_BAR_2;
2953 			bm_addr_offset = (uchar_t *)8;
2954 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2955 		}
2956 
2957 		nvp = &(nvc->nvc_port[port]);
2958 		nvp->nvp_ctlp = nvc;
2959 		nvp->nvp_port_num = port;
2960 		NVLOG(NVDBG_INIT, nvc, nvp, "setting up port mappings", NULL);
2961 
2962 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2963 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2964 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2965 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2966 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2967 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2968 		    (long)bm_addr_offset;
2969 
2970 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2971 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2972 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2973 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2974 	}
2975 }
2976 
2977 
2978 static void
2979 nv_uninit_ctl(nv_ctl_t *nvc)
2980 {
2981 	int port;
2982 	nv_port_t *nvp;
2983 
2984 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered", NULL);
2985 
2986 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2987 		nvp = &(nvc->nvc_port[port]);
2988 		mutex_enter(&nvp->nvp_mutex);
2989 		NVLOG(NVDBG_INIT, nvc, nvp, "uninitializing port", NULL);
2990 		nv_uninit_port(nvp);
2991 		mutex_exit(&nvp->nvp_mutex);
2992 		mutex_destroy(&nvp->nvp_mutex);
2993 		cv_destroy(&nvp->nvp_poll_cv);
2994 	}
2995 
2996 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2997 	nvc->nvc_port = NULL;
2998 }
2999 
3000 
3001 /*
3002  * ck804 interrupt.  This is a wrapper around ck804_intr_process so
3003  * that interrupts from other devices can be disregarded while dtracing.
3004  */
3005 /* ARGSUSED */
3006 static uint_t
3007 ck804_intr(caddr_t arg1, caddr_t arg2)
3008 {
3009 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3010 	uint8_t intr_status;
3011 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3012 
3013 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3014 		return (DDI_INTR_UNCLAIMED);
3015 
3016 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3017 
3018 	if (intr_status == 0) {
3019 
3020 		return (DDI_INTR_UNCLAIMED);
3021 	}
3022 
3023 	ck804_intr_process(nvc, intr_status);
3024 
3025 	return (DDI_INTR_CLAIMED);
3026 }
3027 
3028 
3029 /*
3030  * Main interrupt handler for ck804.  handles normal device
3031  * interrupts as well as port hot plug and remove interrupts.
3032  *
3033  */
3034 static void
3035 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
3036 {
3037 
3038 	int port, i;
3039 	nv_port_t *nvp;
3040 	nv_slot_t *nv_slotp;
3041 	uchar_t	status;
3042 	sata_pkt_t *spkt;
3043 	uint8_t bmstatus, clear_bits;
3044 	ddi_acc_handle_t bmhdl;
3045 	int nvcleared = 0;
3046 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3047 	uint32_t sstatus;
3048 	int port_mask_hot[] = {
3049 		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3050 	};
3051 	int port_mask_pm[] = {
3052 		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3053 	};
3054 
3055 	NVLOG(NVDBG_INTR, nvc, NULL,
3056 	    "ck804_intr_process entered intr_status=%x", intr_status);
3057 
3058 	/*
3059 	 * For command completion interrupt, explicit clear is not required.
3060 	 * however, for the error cases explicit clear is performed.
3061 	 */
3062 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3063 
3064 		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3065 
3066 		if ((port_mask[port] & intr_status) == 0) {
3067 			continue;
3068 		}
3069 
3070 		NVLOG(NVDBG_INTR, nvc, NULL,
3071 		    "ck804_intr_process interrupt on port %d", port);
3072 
3073 		nvp = &(nvc->nvc_port[port]);
3074 
3075 		mutex_enter(&nvp->nvp_mutex);
3076 
3077 		/*
3078 		 * there was a corner case found where an interrupt
3079 		 * arrived before nvp_slot was set.  Should
3080 		 * probably should track down why that happens and try
3081 		 * to eliminate that source and then get rid of this
3082 		 * check.
3083 		 */
3084 		if (nvp->nvp_slot == NULL) {
3085 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3086 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3087 			    "received before initialization "
3088 			    "completed status=%x", status);
3089 			mutex_exit(&nvp->nvp_mutex);
3090 
3091 			/*
3092 			 * clear interrupt bits
3093 			 */
3094 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3095 			    port_mask[port]);
3096 
3097 			continue;
3098 		}
3099 
3100 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
3101 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3102 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3103 			    " no command in progress status=%x", status);
3104 			mutex_exit(&nvp->nvp_mutex);
3105 
3106 			/*
3107 			 * clear interrupt bits
3108 			 */
3109 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3110 			    port_mask[port]);
3111 
3112 			continue;
3113 		}
3114 
3115 		bmhdl = nvp->nvp_bm_hdl;
3116 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3117 
3118 		if (!(bmstatus & BMISX_IDEINTS)) {
3119 			mutex_exit(&nvp->nvp_mutex);
3120 
3121 			continue;
3122 		}
3123 
3124 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3125 
3126 		if (status & SATA_STATUS_BSY) {
3127 			mutex_exit(&nvp->nvp_mutex);
3128 
3129 			continue;
3130 		}
3131 
3132 		nv_slotp = &(nvp->nvp_slot[0]);
3133 
3134 		ASSERT(nv_slotp);
3135 
3136 		spkt = nv_slotp->nvslot_spkt;
3137 
3138 		if (spkt == NULL) {
3139 			mutex_exit(&nvp->nvp_mutex);
3140 
3141 			continue;
3142 		}
3143 
3144 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3145 
3146 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3147 
3148 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3149 
3150 			nv_complete_io(nvp, spkt, 0);
3151 		}
3152 
3153 		mutex_exit(&nvp->nvp_mutex);
3154 	}
3155 
3156 	/*
3157 	 * ck804 often doesn't correctly distinguish hot add/remove
3158 	 * interrupts.  Frequently both the ADD and the REMOVE bits
3159 	 * are asserted, whether it was a remove or add.  Use sstatus
3160 	 * to distinguish hot add from hot remove.
3161 	 */
3162 
3163 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3164 		clear_bits = 0;
3165 
3166 		nvp = &(nvc->nvc_port[port]);
3167 		mutex_enter(&nvp->nvp_mutex);
3168 
3169 		if ((port_mask_pm[port] & intr_status) != 0) {
3170 			clear_bits = port_mask_pm[port];
3171 			NVLOG(NVDBG_HOT, nvc, nvp,
3172 			    "clearing PM interrupt bit: %x",
3173 			    intr_status & port_mask_pm[port]);
3174 		}
3175 
3176 		if ((port_mask_hot[port] & intr_status) == 0) {
3177 			if (clear_bits != 0) {
3178 				goto clear;
3179 			} else {
3180 				mutex_exit(&nvp->nvp_mutex);
3181 				continue;
3182 			}
3183 		}
3184 
3185 		/*
3186 		 * reaching here means there was a hot add or remove.
3187 		 */
3188 		clear_bits |= port_mask_hot[port];
3189 
3190 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
3191 
3192 		sstatus = nv_get32(bar5_hdl,
3193 		    nvc->nvc_port[port].nvp_sstatus);
3194 
3195 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3196 		    SSTATUS_DET_DEVPRE_PHYCOM) {
3197 			nv_report_add_remove(nvp, 0);
3198 		} else {
3199 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3200 		}
3201 	clear:
3202 		/*
3203 		 * clear interrupt bits.  explicit interrupt clear is
3204 		 * required for hotplug interrupts.
3205 		 */
3206 		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3207 
3208 		/*
3209 		 * make sure it's flushed and cleared.  If not try
3210 		 * again.  Sometimes it has been observed to not clear
3211 		 * on the first try.
3212 		 */
3213 		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3214 
3215 		/*
3216 		 * make 10 additional attempts to clear the interrupt
3217 		 */
3218 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3219 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3220 			    "still not clear try=%d", intr_status,
3221 			    ++nvcleared);
3222 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3223 			    clear_bits);
3224 			intr_status = nv_get8(bar5_hdl,
3225 			    nvc->nvc_ck804_int_status);
3226 		}
3227 
3228 		/*
3229 		 * if still not clear, log a message and disable the
3230 		 * port. highly unlikely that this path is taken, but it
3231 		 * gives protection against a wedged interrupt.
3232 		 */
3233 		if (intr_status & clear_bits) {
3234 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3235 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3236 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3237 			nvp->nvp_state |= NV_PORT_FAILED;
3238 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3239 			    B_TRUE);
3240 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3241 			    "interrupt.  disabling port intr_status=%X",
3242 			    intr_status);
3243 		}
3244 
3245 		mutex_exit(&nvp->nvp_mutex);
3246 	}
3247 }
3248 
3249 
3250 /*
3251  * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
3252  * on the controller, to handle completion and hot plug and remove events.
3253  *
3254  */
3255 static uint_t
3256 mcp5x_intr_port(nv_port_t *nvp)
3257 {
3258 	nv_ctl_t *nvc = nvp->nvp_ctlp;
3259 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3260 	uint8_t clear = 0, intr_cycles = 0;
3261 	int ret = DDI_INTR_UNCLAIMED;
3262 	uint16_t int_status;
3263 	clock_t intr_time;
3264 	int loop_cnt = 0;
3265 
3266 	nvp->intr_start_time = ddi_get_lbolt();
3267 
3268 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered", NULL);
3269 
3270 	do {
3271 		/*
3272 		 * read current interrupt status
3273 		 */
3274 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3275 
3276 		NVLOG(NVDBG_INTR, nvc, nvp, "int_status = %x", int_status);
3277 
3278 		/*
3279 		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3280 		 * but are masked out from causing an interrupt to be generated
3281 		 * to the processor.  Ignore them here by masking them out.
3282 		 */
3283 		int_status &= ~(MCP5X_INT_IGNORE);
3284 
3285 		/*
3286 		 * exit the loop when no more interrupts to process
3287 		 */
3288 		if (int_status == 0) {
3289 
3290 			break;
3291 		}
3292 
3293 		if (int_status & MCP5X_INT_COMPLETE) {
3294 			NVLOG(NVDBG_INTR, nvc, nvp,
3295 			    "mcp5x_packet_complete_intr", NULL);
3296 			/*
3297 			 * since int_status was set, return DDI_INTR_CLAIMED
3298 			 * from the DDI's perspective even though the packet
3299 			 * completion may not have succeeded.  If it fails,
3300 			 * need to manually clear the interrupt, otherwise
3301 			 * clearing is implicit.
3302 			 */
3303 			ret = DDI_INTR_CLAIMED;
3304 			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3305 			    NV_FAILURE) {
3306 				clear |= MCP5X_INT_COMPLETE;
3307 			} else {
3308 				intr_cycles = 0;
3309 			}
3310 		}
3311 
3312 		if (int_status & MCP5X_INT_DMA_SETUP) {
3313 			NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr",
3314 			    NULL);
3315 
3316 			/*
3317 			 * Needs to be cleared before starting the BM, so do it
3318 			 * now.  make sure this is still working.
3319 			 */
3320 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3321 			    MCP5X_INT_DMA_SETUP);
3322 #ifdef NCQ
3323 			ret = mcp5x_dma_setup_intr(nvc, nvp);
3324 #endif
3325 		}
3326 
3327 		if (int_status & MCP5X_INT_REM) {
3328 			clear |= MCP5X_INT_REM;
3329 			ret = DDI_INTR_CLAIMED;
3330 
3331 			mutex_enter(&nvp->nvp_mutex);
3332 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3333 			mutex_exit(&nvp->nvp_mutex);
3334 
3335 		} else if (int_status & MCP5X_INT_ADD) {
3336 			clear |= MCP5X_INT_ADD;
3337 			ret = DDI_INTR_CLAIMED;
3338 
3339 			mutex_enter(&nvp->nvp_mutex);
3340 			nv_report_add_remove(nvp, 0);
3341 			mutex_exit(&nvp->nvp_mutex);
3342 		}
3343 		if (clear) {
3344 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3345 			clear = 0;
3346 		}
3347 		/* Protect against a stuck interrupt */
3348 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3349 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3350 			    "processing.  Disabling port int_status=%X"
3351 			    " clear=%X", int_status, clear);
3352 			mutex_enter(&nvp->nvp_mutex);
3353 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3354 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3355 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3356 			nvp->nvp_state |= NV_PORT_FAILED;
3357 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3358 			    B_TRUE);
3359 			mutex_exit(&nvp->nvp_mutex);
3360 		}
3361 
3362 	} while (loop_cnt++ < nv_max_intr_loops);
3363 
3364 	if (loop_cnt > nvp->intr_loop_cnt) {
3365 		NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp,
3366 		    "Exiting with multiple intr loop count %d", loop_cnt);
3367 		nvp->intr_loop_cnt = loop_cnt;
3368 	}
3369 
3370 	if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3371 	    (NVDBG_INTR | NVDBG_VERBOSE)) {
3372 		uint8_t status, bmstatus;
3373 		uint16_t int_status2;
3374 
3375 		if (int_status & MCP5X_INT_COMPLETE) {
3376 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3377 			bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3378 			int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3379 			    nvp->nvp_mcp5x_int_status);
3380 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3381 			    "mcp55_intr_port: Exiting with altstatus %x, "
3382 			    "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3383 			    " loop_cnt %d ", status, bmstatus, int_status2,
3384 			    int_status, ret, loop_cnt);
3385 		}
3386 	}
3387 
3388 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret);
3389 
3390 	/*
3391 	 * To facilitate debugging, keep track of the length of time spent in
3392 	 * the port interrupt routine.
3393 	 */
3394 	intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3395 	if (intr_time > nvp->intr_duration)
3396 		nvp->intr_duration = intr_time;
3397 
3398 	return (ret);
3399 }
3400 
3401 
3402 /* ARGSUSED */
3403 static uint_t
3404 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3405 {
3406 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3407 	int ret;
3408 
3409 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3410 		return (DDI_INTR_UNCLAIMED);
3411 
3412 	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3413 	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3414 
3415 	return (ret);
3416 }
3417 
3418 
3419 #ifdef NCQ
3420 /*
3421  * with software driven NCQ on mcp5x, an interrupt occurs right
3422  * before the drive is ready to do a DMA transfer.  At this point,
3423  * the PRD table needs to be programmed and the DMA engine enabled
3424  * and ready to go.
3425  *
3426  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3427  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3428  * -- clear bit 0 of master command reg
3429  * -- program PRD
3430  * -- clear the interrupt status bit for the DMA Setup FIS
3431  * -- set bit 0 of the bus master command register
3432  */
3433 static int
3434 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3435 {
3436 	int slot;
3437 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3438 	uint8_t bmicx;
3439 	int port = nvp->nvp_port_num;
3440 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3441 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3442 
3443 	nv_cmn_err(CE_PANIC, nvc, nvp,
3444 	    "this is should not be executed at all until NCQ");
3445 
3446 	mutex_enter(&nvp->nvp_mutex);
3447 
3448 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3449 
3450 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3451 
3452 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3453 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache);
3454 
3455 	/*
3456 	 * halt the DMA engine.  This step is necessary according to
3457 	 * the mcp5x spec, probably since there may have been a "first" packet
3458 	 * that already programmed the DMA engine, but may not turn out to
3459 	 * be the first one processed.
3460 	 */
3461 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3462 
3463 	if (bmicx & BMICX_SSBM) {
3464 		NVLOG(NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3465 		    "another packet.  Cancelling and reprogramming", NULL);
3466 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3467 	}
3468 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3469 
3470 	nv_start_dma_engine(nvp, slot);
3471 
3472 	mutex_exit(&nvp->nvp_mutex);
3473 
3474 	return (DDI_INTR_CLAIMED);
3475 }
3476 #endif /* NCQ */
3477 
3478 
3479 /*
3480  * packet completion interrupt.  If the packet is complete, invoke
3481  * the packet completion callback.
3482  */
3483 static int
3484 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3485 {
3486 	uint8_t status, bmstatus;
3487 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3488 	int sactive;
3489 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3490 	sata_pkt_t *spkt;
3491 	nv_slot_t *nv_slotp;
3492 
3493 	mutex_enter(&nvp->nvp_mutex);
3494 
3495 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3496 
3497 	if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3498 		NVLOG(NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set", NULL);
3499 		mutex_exit(&nvp->nvp_mutex);
3500 
3501 		return (NV_FAILURE);
3502 	}
3503 
3504 	/*
3505 	 * Commands may have been processed by abort or timeout before
3506 	 * interrupt processing acquired the mutex. So we may be processing
3507 	 * an interrupt for packets that were already removed.
3508 	 * For functionning NCQ processing all slots may be checked, but
3509 	 * with NCQ disabled (current code), relying on *_run flags is OK.
3510 	 */
3511 	if (nvp->nvp_non_ncq_run) {
3512 		/*
3513 		 * If the just completed item is a non-ncq command, the busy
3514 		 * bit should not be set
3515 		 */
3516 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3517 		if (status & SATA_STATUS_BSY) {
3518 			nv_cmn_err(CE_WARN, nvc, nvp,
3519 			    "unexpected SATA_STATUS_BSY set");
3520 			mutex_exit(&nvp->nvp_mutex);
3521 			/*
3522 			 * calling function will clear interrupt.  then
3523 			 * the real interrupt will either arrive or the
3524 			 * packet timeout handling will take over and
3525 			 * reset.
3526 			 */
3527 			return (NV_FAILURE);
3528 		}
3529 		ASSERT(nvp->nvp_ncq_run == 0);
3530 	} else {
3531 		ASSERT(nvp->nvp_non_ncq_run == 0);
3532 		/*
3533 		 * Pre-NCQ code!
3534 		 * Nothing to do. The packet for the command that just
3535 		 * completed is already gone. Just clear the interrupt.
3536 		 */
3537 		(void) nv_bm_status_clear(nvp);
3538 		(void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3539 		mutex_exit(&nvp->nvp_mutex);
3540 		return (NV_SUCCESS);
3541 
3542 		/*
3543 		 * NCQ check for BSY here and wait if still bsy before
3544 		 * continuing. Rather than wait for it to be cleared
3545 		 * when starting a packet and wasting CPU time, the starting
3546 		 * thread can exit immediate, but might have to spin here
3547 		 * for a bit possibly.  Needs more work and experimentation.
3548 		 *
3549 		 */
3550 	}
3551 
3552 	/*
3553 	 * active_pkt_bit will represent the bitmap of the single completed
3554 	 * packet.  Because of the nature of sw assisted NCQ, only one
3555 	 * command will complete per interrupt.
3556 	 */
3557 
3558 	if (ncq_command == B_FALSE) {
3559 		active_pkt = 0;
3560 	} else {
3561 		/*
3562 		 * NCQ: determine which command just completed, by examining
3563 		 * which bit cleared in the register since last written.
3564 		 */
3565 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3566 
3567 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3568 
3569 		ASSERT(active_pkt_bit);
3570 
3571 
3572 		/*
3573 		 * this failure path needs more work to handle the
3574 		 * error condition and recovery.
3575 		 */
3576 		if (active_pkt_bit == 0) {
3577 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3578 
3579 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3580 			    "nvp->nvp_sactive %X", sactive,
3581 			    nvp->nvp_sactive_cache);
3582 
3583 			(void) nv_get8(cmdhdl, nvp->nvp_status);
3584 
3585 			mutex_exit(&nvp->nvp_mutex);
3586 
3587 			return (NV_FAILURE);
3588 		}
3589 
3590 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3591 		    active_pkt++, active_pkt_bit >>= 1) {
3592 		}
3593 
3594 		/*
3595 		 * make sure only one bit is ever turned on
3596 		 */
3597 		ASSERT(active_pkt_bit == 1);
3598 
3599 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3600 	}
3601 
3602 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3603 
3604 	spkt = nv_slotp->nvslot_spkt;
3605 
3606 	ASSERT(spkt != NULL);
3607 
3608 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3609 
3610 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3611 
3612 	if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3613 
3614 		nv_complete_io(nvp, spkt, active_pkt);
3615 	}
3616 
3617 	mutex_exit(&nvp->nvp_mutex);
3618 
3619 	return (NV_SUCCESS);
3620 }
3621 
3622 
3623 static void
3624 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3625 {
3626 
3627 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3628 
3629 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3630 		nvp->nvp_ncq_run--;
3631 	} else {
3632 		nvp->nvp_non_ncq_run--;
3633 	}
3634 
3635 	/*
3636 	 * mark the packet slot idle so it can be reused.  Do this before
3637 	 * calling satapkt_comp so the slot can be reused.
3638 	 */
3639 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3640 
3641 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3642 		/*
3643 		 * If this is not timed polled mode cmd, which has an
3644 		 * active thread monitoring for completion, then need
3645 		 * to signal the sleeping thread that the cmd is complete.
3646 		 */
3647 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3648 			cv_signal(&nvp->nvp_poll_cv);
3649 		}
3650 
3651 		return;
3652 	}
3653 
3654 	if (spkt->satapkt_comp != NULL) {
3655 		mutex_exit(&nvp->nvp_mutex);
3656 		(*spkt->satapkt_comp)(spkt);
3657 		mutex_enter(&nvp->nvp_mutex);
3658 	}
3659 }
3660 
3661 
3662 /*
3663  * check whether packet is ncq command or not.  for ncq command,
3664  * start it if there is still room on queue.  for non-ncq command only
3665  * start if no other command is running.
3666  */
3667 static int
3668 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3669 {
3670 	uint8_t cmd, ncq;
3671 
3672 	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry", NULL);
3673 
3674 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3675 
3676 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3677 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3678 
3679 	if (ncq == B_FALSE) {
3680 
3681 		if ((nvp->nvp_non_ncq_run == 1) ||
3682 		    (nvp->nvp_ncq_run > 0)) {
3683 			/*
3684 			 * next command is non-ncq which can't run
3685 			 * concurrently.  exit and return queue full.
3686 			 */
3687 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3688 
3689 			return (SATA_TRAN_QUEUE_FULL);
3690 		}
3691 
3692 		return (nv_start_common(nvp, spkt));
3693 	}
3694 
3695 	/*
3696 	 * ncq == B_TRUE
3697 	 */
3698 	if (nvp->nvp_non_ncq_run == 1) {
3699 		/*
3700 		 * cannot start any NCQ commands when there
3701 		 * is a non-NCQ command running.
3702 		 */
3703 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3704 
3705 		return (SATA_TRAN_QUEUE_FULL);
3706 	}
3707 
3708 #ifdef NCQ
3709 	/*
3710 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3711 	 * is being pulled out until NCQ support is later addressed
3712 	 *
3713 	 * nvp_queue_depth is initialized by the first NCQ command
3714 	 * received.
3715 	 */
3716 	if (nvp->nvp_queue_depth == 1) {
3717 		nvp->nvp_queue_depth =
3718 		    spkt->satapkt_device.satadev_qdepth;
3719 
3720 		ASSERT(nvp->nvp_queue_depth > 1);
3721 
3722 		NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3723 		    "nv_process_queue: nvp_queue_depth set to %d",
3724 		    nvp->nvp_queue_depth);
3725 	}
3726 #endif
3727 
3728 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3729 		/*
3730 		 * max number of NCQ commands already active
3731 		 */
3732 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3733 
3734 		return (SATA_TRAN_QUEUE_FULL);
3735 	}
3736 
3737 	return (nv_start_common(nvp, spkt));
3738 }
3739 
3740 
3741 /*
3742  * configure INTx and legacy interrupts
3743  */
3744 static int
3745 nv_add_legacy_intrs(nv_ctl_t *nvc)
3746 {
3747 	dev_info_t	*devinfo = nvc->nvc_dip;
3748 	int		actual, count = 0;
3749 	int		x, y, rc, inum = 0;
3750 
3751 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_legacy_intrs", NULL);
3752 
3753 	/*
3754 	 * get number of interrupts
3755 	 */
3756 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3757 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3758 		NVLOG(NVDBG_INIT, nvc, NULL,
3759 		    "ddi_intr_get_nintrs() failed, "
3760 		    "rc %d count %d", rc, count);
3761 
3762 		return (DDI_FAILURE);
3763 	}
3764 
3765 	/*
3766 	 * allocate an array of interrupt handles
3767 	 */
3768 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3769 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3770 
3771 	/*
3772 	 * call ddi_intr_alloc()
3773 	 */
3774 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3775 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3776 
3777 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3778 		nv_cmn_err(CE_WARN, nvc, NULL,
3779 		    "ddi_intr_alloc() failed, rc %d", rc);
3780 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3781 
3782 		return (DDI_FAILURE);
3783 	}
3784 
3785 	if (actual < count) {
3786 		nv_cmn_err(CE_WARN, nvc, NULL,
3787 		    "ddi_intr_alloc: requested: %d, received: %d",
3788 		    count, actual);
3789 
3790 		goto failure;
3791 	}
3792 
3793 	nvc->nvc_intr_cnt = actual;
3794 
3795 	/*
3796 	 * get intr priority
3797 	 */
3798 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3799 	    DDI_SUCCESS) {
3800 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3801 
3802 		goto failure;
3803 	}
3804 
3805 	/*
3806 	 * Test for high level mutex
3807 	 */
3808 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3809 		nv_cmn_err(CE_WARN, nvc, NULL,
3810 		    "nv_add_legacy_intrs: high level intr not supported");
3811 
3812 		goto failure;
3813 	}
3814 
3815 	for (x = 0; x < actual; x++) {
3816 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3817 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3818 			nv_cmn_err(CE_WARN, nvc, NULL,
3819 			    "ddi_intr_add_handler() failed");
3820 
3821 			goto failure;
3822 		}
3823 	}
3824 
3825 	/*
3826 	 * call ddi_intr_enable() for legacy interrupts
3827 	 */
3828 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3829 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3830 	}
3831 
3832 	return (DDI_SUCCESS);
3833 
3834 	failure:
3835 	/*
3836 	 * free allocated intr and nvc_htable
3837 	 */
3838 	for (y = 0; y < actual; y++) {
3839 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3840 	}
3841 
3842 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3843 
3844 	return (DDI_FAILURE);
3845 }
3846 
3847 #ifdef	NV_MSI_SUPPORTED
3848 /*
3849  * configure MSI interrupts
3850  */
3851 static int
3852 nv_add_msi_intrs(nv_ctl_t *nvc)
3853 {
3854 	dev_info_t	*devinfo = nvc->nvc_dip;
3855 	int		count, avail, actual;
3856 	int		x, y, rc, inum = 0;
3857 
3858 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_msi_intrs", NULL);
3859 
3860 	/*
3861 	 * get number of interrupts
3862 	 */
3863 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3864 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3865 		nv_cmn_err(CE_WARN, nvc, NULL,
3866 		    "ddi_intr_get_nintrs() failed, "
3867 		    "rc %d count %d", rc, count);
3868 
3869 		return (DDI_FAILURE);
3870 	}
3871 
3872 	/*
3873 	 * get number of available interrupts
3874 	 */
3875 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3876 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3877 		nv_cmn_err(CE_WARN, nvc, NULL,
3878 		    "ddi_intr_get_navail() failed, "
3879 		    "rc %d avail %d", rc, avail);
3880 
3881 		return (DDI_FAILURE);
3882 	}
3883 
3884 	if (avail < count) {
3885 		nv_cmn_err(CE_WARN, nvc, NULL,
3886 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3887 		    avail, count);
3888 	}
3889 
3890 	/*
3891 	 * allocate an array of interrupt handles
3892 	 */
3893 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3894 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3895 
3896 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3897 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3898 
3899 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3900 		nv_cmn_err(CE_WARN, nvc, NULL,
3901 		    "ddi_intr_alloc() failed, rc %d", rc);
3902 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3903 
3904 		return (DDI_FAILURE);
3905 	}
3906 
3907 	/*
3908 	 * Use interrupt count returned or abort?
3909 	 */
3910 	if (actual < count) {
3911 		NVLOG(NVDBG_INIT, nvc, NULL,
3912 		    "Requested: %d, Received: %d", count, actual);
3913 	}
3914 
3915 	nvc->nvc_intr_cnt = actual;
3916 
3917 	/*
3918 	 * get priority for first msi, assume remaining are all the same
3919 	 */
3920 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3921 	    DDI_SUCCESS) {
3922 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3923 
3924 		goto failure;
3925 	}
3926 
3927 	/*
3928 	 * test for high level mutex
3929 	 */
3930 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3931 		nv_cmn_err(CE_WARN, nvc, NULL,
3932 		    "nv_add_msi_intrs: high level intr not supported");
3933 
3934 		goto failure;
3935 	}
3936 
3937 	/*
3938 	 * Call ddi_intr_add_handler()
3939 	 */
3940 	for (x = 0; x < actual; x++) {
3941 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3942 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3943 			nv_cmn_err(CE_WARN, nvc, NULL,
3944 			    "ddi_intr_add_handler() failed");
3945 
3946 			goto failure;
3947 		}
3948 	}
3949 
3950 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3951 
3952 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3953 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3954 		    nvc->nvc_intr_cnt);
3955 	} else {
3956 		/*
3957 		 * Call ddi_intr_enable() for MSI non block enable
3958 		 */
3959 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3960 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3961 		}
3962 	}
3963 
3964 	return (DDI_SUCCESS);
3965 
3966 	failure:
3967 	/*
3968 	 * free allocated intr and nvc_htable
3969 	 */
3970 	for (y = 0; y < actual; y++) {
3971 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3972 	}
3973 
3974 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3975 
3976 	return (DDI_FAILURE);
3977 }
3978 #endif
3979 
3980 
3981 static void
3982 nv_rem_intrs(nv_ctl_t *nvc)
3983 {
3984 	int x, i;
3985 	nv_port_t *nvp;
3986 
3987 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_rem_intrs", NULL);
3988 
3989 	/*
3990 	 * prevent controller from generating interrupts by
3991 	 * masking them out.  This is an extra precaution.
3992 	 */
3993 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3994 		nvp = (&nvc->nvc_port[i]);
3995 		mutex_enter(&nvp->nvp_mutex);
3996 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3997 		mutex_exit(&nvp->nvp_mutex);
3998 	}
3999 
4000 	/*
4001 	 * disable all interrupts
4002 	 */
4003 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
4004 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
4005 		(void) ddi_intr_block_disable(nvc->nvc_htable,
4006 		    nvc->nvc_intr_cnt);
4007 	} else {
4008 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4009 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
4010 		}
4011 	}
4012 
4013 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4014 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
4015 		(void) ddi_intr_free(nvc->nvc_htable[x]);
4016 	}
4017 
4018 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4019 }
4020 
4021 
4022 /*
4023  * variable argument wrapper for cmn_err.  prefixes the instance and port
4024  * number if possible
4025  */
4026 static void
4027 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
4028 {
4029 	char port[NV_STR_LEN];
4030 	char inst[NV_STR_LEN];
4031 	dev_info_t *dip;
4032 
4033 	if (nvc) {
4034 		(void) snprintf(inst, NV_STR_LEN, "inst %d",
4035 		    ddi_get_instance(nvc->nvc_dip));
4036 		dip = nvc->nvc_dip;
4037 	} else {
4038 		inst[0] = '\0';
4039 	}
4040 
4041 	if (nvp) {
4042 		(void) sprintf(port, "port%d", nvp->nvp_port_num);
4043 		dip = nvp->nvp_ctlp->nvc_dip;
4044 	} else {
4045 		port[0] = '\0';
4046 	}
4047 
4048 	mutex_enter(&nv_log_mutex);
4049 
4050 	(void) sprintf(nv_log_buf, "nv_sata %s %s%s", inst, port,
4051 	    (inst[0]|port[0] ? ": " :""));
4052 
4053 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4054 	    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4055 
4056 	/*
4057 	 * Log to console or log to file, depending on
4058 	 * nv_log_to_console setting.
4059 	 */
4060 	if (nv_log_to_console) {
4061 		if (nv_prom_print) {
4062 			prom_printf("%s\n", nv_log_buf);
4063 		} else {
4064 			cmn_err(ce, "%s", nv_log_buf);
4065 		}
4066 
4067 
4068 	} else {
4069 		cmn_err(ce, "!%s", nv_log_buf);
4070 	}
4071 
4072 
4073 	(void) sprintf(nv_log_buf, "%s%s", port, (port[0] ? ": " :""));
4074 
4075 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4076 	    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4077 
4078 	sata_trace_debug(dip, nv_log_buf);
4079 
4080 
4081 	mutex_exit(&nv_log_mutex);
4082 }
4083 
4084 
4085 /*
4086  * wrapper for cmn_err
4087  */
4088 static void
4089 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4090 {
4091 	va_list ap;
4092 
4093 	va_start(ap, fmt);
4094 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
4095 	va_end(ap);
4096 }
4097 
4098 
4099 static void
4100 nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...)
4101 {
4102 	va_list ap;
4103 
4104 	va_start(ap, fmt);
4105 
4106 	if (nvp == NULL && nvc == NULL) {
4107 		sata_vtrace_debug(NULL, fmt, ap);
4108 		va_end(ap);
4109 
4110 		return;
4111 	}
4112 
4113 	if (nvp == NULL && nvc != NULL) {
4114 		sata_vtrace_debug(nvc->nvc_dip, fmt, ap);
4115 		va_end(ap);
4116 
4117 		return;
4118 	}
4119 
4120 	/*
4121 	 * nvp is not NULL, but nvc might be.  Reference nvp for both
4122 	 * port and dip.
4123 	 */
4124 	mutex_enter(&nv_log_mutex);
4125 
4126 	(void) snprintf(nv_log_buf, NV_LOGBUF_LEN, "port%d: %s",
4127 	    nvp->nvp_port_num, fmt);
4128 
4129 	sata_vtrace_debug(nvp->nvp_ctlp->nvc_dip, nv_log_buf, ap);
4130 
4131 	mutex_exit(&nv_log_mutex);
4132 
4133 	va_end(ap);
4134 }
4135 
4136 
4137 /*
4138  * program registers which are common to all commands
4139  */
4140 static void
4141 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4142 {
4143 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4144 	sata_pkt_t *spkt;
4145 	sata_cmd_t *satacmd;
4146 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4147 	uint8_t cmd, ncq = B_FALSE;
4148 
4149 	spkt = nv_slotp->nvslot_spkt;
4150 	satacmd = &spkt->satapkt_cmd;
4151 	cmd = satacmd->satacmd_cmd_reg;
4152 
4153 	ASSERT(nvp->nvp_slot);
4154 
4155 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4156 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4157 		ncq = B_TRUE;
4158 	}
4159 
4160 	/*
4161 	 * select the drive
4162 	 */
4163 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4164 
4165 	/*
4166 	 * make certain the drive selected
4167 	 */
4168 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4169 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4170 
4171 		return;
4172 	}
4173 
4174 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
4175 
4176 	case ATA_ADDR_LBA:
4177 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode",
4178 		    NULL);
4179 
4180 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4181 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4182 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4183 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4184 
4185 		break;
4186 
4187 	case ATA_ADDR_LBA28:
4188 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4189 		    "ATA_ADDR_LBA28 mode", NULL);
4190 		/*
4191 		 * NCQ only uses 48-bit addressing
4192 		 */
4193 		ASSERT(ncq != B_TRUE);
4194 
4195 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4196 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4197 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4198 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4199 
4200 		break;
4201 
4202 	case ATA_ADDR_LBA48:
4203 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4204 		    "ATA_ADDR_LBA48 mode", NULL);
4205 
4206 		/*
4207 		 * for NCQ, tag goes into count register and real sector count
4208 		 * into features register.  The sata module does the translation
4209 		 * in the satacmd.
4210 		 */
4211 		if (ncq == B_TRUE) {
4212 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4213 			nv_put8(cmdhdl, nvp->nvp_feature,
4214 			    satacmd->satacmd_features_reg_ext);
4215 			nv_put8(cmdhdl, nvp->nvp_feature,
4216 			    satacmd->satacmd_features_reg);
4217 		} else {
4218 			nv_put8(cmdhdl, nvp->nvp_count,
4219 			    satacmd->satacmd_sec_count_msb);
4220 			nv_put8(cmdhdl, nvp->nvp_count,
4221 			    satacmd->satacmd_sec_count_lsb);
4222 		}
4223 
4224 		/*
4225 		 * send the high-order half first
4226 		 */
4227 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4228 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4229 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4230 		/*
4231 		 * Send the low-order half
4232 		 */
4233 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4234 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4235 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4236 
4237 		break;
4238 
4239 	case 0:
4240 		/*
4241 		 * non-media access commands such as identify and features
4242 		 * take this path.
4243 		 */
4244 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4245 		nv_put8(cmdhdl, nvp->nvp_feature,
4246 		    satacmd->satacmd_features_reg);
4247 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4248 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4249 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4250 
4251 		break;
4252 
4253 	default:
4254 		break;
4255 	}
4256 
4257 	ASSERT(nvp->nvp_slot);
4258 }
4259 
4260 
4261 /*
4262  * start a command that involves no media access
4263  */
4264 static int
4265 nv_start_nodata(nv_port_t *nvp, int slot)
4266 {
4267 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4268 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4269 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4270 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4271 
4272 	nv_program_taskfile_regs(nvp, slot);
4273 
4274 	/*
4275 	 * This next one sets the controller in motion
4276 	 */
4277 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4278 
4279 	return (SATA_TRAN_ACCEPTED);
4280 }
4281 
4282 
4283 static int
4284 nv_bm_status_clear(nv_port_t *nvp)
4285 {
4286 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4287 	uchar_t	status, ret;
4288 
4289 	/*
4290 	 * Get the current BM status
4291 	 */
4292 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4293 
4294 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4295 
4296 	/*
4297 	 * Clear the latches (and preserve the other bits)
4298 	 */
4299 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
4300 
4301 	return (ret);
4302 }
4303 
4304 
4305 /*
4306  * program the bus master DMA engine with the PRD address for
4307  * the active slot command, and start the DMA engine.
4308  */
4309 static void
4310 nv_start_dma_engine(nv_port_t *nvp, int slot)
4311 {
4312 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4313 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4314 	uchar_t direction;
4315 
4316 	ASSERT(nv_slotp->nvslot_spkt != NULL);
4317 
4318 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4319 	    == SATA_DIR_READ) {
4320 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4321 	} else {
4322 		direction = BMICX_RWCON_READ_FROM_MEMORY;
4323 	}
4324 
4325 	NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4326 	    "nv_start_dma_engine entered", NULL);
4327 
4328 #if NOT_USED
4329 	/*
4330 	 * NOT NEEDED. Left here of historical reason.
4331 	 * Reset the controller's interrupt and error status bits.
4332 	 */
4333 	(void) nv_bm_status_clear(nvp);
4334 #endif
4335 	/*
4336 	 * program the PRD table physical start address
4337 	 */
4338 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4339 
4340 	/*
4341 	 * set the direction control and start the DMA controller
4342 	 */
4343 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4344 }
4345 
4346 /*
4347  * start dma command, either in or out
4348  */
4349 static int
4350 nv_start_dma(nv_port_t *nvp, int slot)
4351 {
4352 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4353 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4354 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4355 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4356 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4357 #ifdef NCQ
4358 	uint8_t ncq = B_FALSE;
4359 #endif
4360 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4361 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4362 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4363 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4364 
4365 	ASSERT(sg_count != 0);
4366 
4367 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4368 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4369 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4370 		    sata_cmdp->satacmd_num_dma_cookies);
4371 
4372 		return (NV_FAILURE);
4373 	}
4374 
4375 	nv_program_taskfile_regs(nvp, slot);
4376 
4377 	/*
4378 	 * start the drive in motion
4379 	 */
4380 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4381 
4382 	/*
4383 	 * the drive starts processing the transaction when the cmd register
4384 	 * is written.  This is done here before programming the DMA engine to
4385 	 * parallelize and save some time.  In the event that the drive is ready
4386 	 * before DMA, it will wait.
4387 	 */
4388 #ifdef NCQ
4389 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4390 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4391 		ncq = B_TRUE;
4392 	}
4393 #endif
4394 
4395 	/*
4396 	 * copy the PRD list to PRD table in DMA accessible memory
4397 	 * so that the controller can access it.
4398 	 */
4399 	for (idx = 0; idx < sg_count; idx++, srcp++) {
4400 		uint32_t size;
4401 
4402 		nv_put32(sghdl, dstp++, srcp->dmac_address);
4403 
4404 		/* Set the number of bytes to transfer, 0 implies 64KB */
4405 		size = srcp->dmac_size;
4406 		if (size == 0x10000)
4407 			size = 0;
4408 
4409 		/*
4410 		 * If this is a 40-bit address, copy bits 32-40 of the
4411 		 * physical address to bits 16-24 of the PRD count.
4412 		 */
4413 		if (srcp->dmac_laddress > UINT32_MAX) {
4414 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4415 		}
4416 
4417 		/*
4418 		 * set the end of table flag for the last entry
4419 		 */
4420 		if (idx == (sg_count - 1)) {
4421 			size |= PRDE_EOT;
4422 		}
4423 
4424 		nv_put32(sghdl, dstp++, size);
4425 	}
4426 
4427 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4428 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4429 
4430 	nv_start_dma_engine(nvp, slot);
4431 
4432 #ifdef NCQ
4433 	/*
4434 	 * optimization:  for SWNCQ, start DMA engine if this is the only
4435 	 * command running.  Preliminary NCQ efforts indicated this needs
4436 	 * more debugging.
4437 	 *
4438 	 * if (nvp->nvp_ncq_run <= 1)
4439 	 */
4440 
4441 	if (ncq == B_FALSE) {
4442 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4443 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4444 		    " cmd = %X", non_ncq_commands++, cmd);
4445 		nv_start_dma_engine(nvp, slot);
4446 	} else {
4447 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "NCQ, so program "
4448 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd);
4449 	}
4450 #endif /* NCQ */
4451 
4452 	return (SATA_TRAN_ACCEPTED);
4453 }
4454 
4455 
4456 /*
4457  * start a PIO data-in ATA command
4458  */
4459 static int
4460 nv_start_pio_in(nv_port_t *nvp, int slot)
4461 {
4462 
4463 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4464 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4465 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4466 
4467 	nv_program_taskfile_regs(nvp, slot);
4468 
4469 	/*
4470 	 * This next one sets the drive in motion
4471 	 */
4472 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4473 
4474 	return (SATA_TRAN_ACCEPTED);
4475 }
4476 
4477 
4478 /*
4479  * start a PIO data-out ATA command
4480  */
4481 static int
4482 nv_start_pio_out(nv_port_t *nvp, int slot)
4483 {
4484 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4485 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4486 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4487 
4488 	nv_program_taskfile_regs(nvp, slot);
4489 
4490 	/*
4491 	 * this next one sets the drive in motion
4492 	 */
4493 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4494 
4495 	/*
4496 	 * wait for the busy bit to settle
4497 	 */
4498 	NV_DELAY_NSEC(400);
4499 
4500 	/*
4501 	 * wait for the drive to assert DRQ to send the first chunk
4502 	 * of data. Have to busy wait because there's no interrupt for
4503 	 * the first chunk. This is bad... uses a lot of cycles if the
4504 	 * drive responds too slowly or if the wait loop granularity
4505 	 * is too large. It's even worse if the drive is defective and
4506 	 * the loop times out.
4507 	 */
4508 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4509 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4510 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4511 	    4000000, 0) == B_FALSE) {
4512 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4513 
4514 		goto error;
4515 	}
4516 
4517 	/*
4518 	 * send the first block.
4519 	 */
4520 	nv_intr_pio_out(nvp, nv_slotp);
4521 
4522 	/*
4523 	 * If nvslot_flags is not set to COMPLETE yet, then processing
4524 	 * is OK so far, so return.  Otherwise, fall into error handling
4525 	 * below.
4526 	 */
4527 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4528 
4529 		return (SATA_TRAN_ACCEPTED);
4530 	}
4531 
4532 	error:
4533 	/*
4534 	 * there was an error so reset the device and complete the packet.
4535 	 */
4536 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4537 	nv_complete_io(nvp, spkt, 0);
4538 	nvp->nvp_state |= NV_PORT_RESET;
4539 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4540 	nv_reset(nvp, "pio_out");
4541 
4542 	return (SATA_TRAN_PORT_ERROR);
4543 }
4544 
4545 
4546 /*
4547  * start a ATAPI Packet command (PIO data in or out)
4548  */
4549 static int
4550 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4551 {
4552 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4553 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4554 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4555 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4556 
4557 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4558 	    "nv_start_pkt_pio: start", NULL);
4559 
4560 	/*
4561 	 * Write the PACKET command to the command register.  Normally
4562 	 * this would be done through nv_program_taskfile_regs().  It
4563 	 * is done here because some values need to be overridden.
4564 	 */
4565 
4566 	/* select the drive */
4567 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4568 
4569 	/* make certain the drive selected */
4570 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4571 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4572 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4573 		    "nv_start_pkt_pio: drive select failed", NULL);
4574 		return (SATA_TRAN_PORT_ERROR);
4575 	}
4576 
4577 	/*
4578 	 * The command is always sent via PIO, despite whatever the SATA
4579 	 * framework sets in the command.  Overwrite the DMA bit to do this.
4580 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4581 	 */
4582 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4583 
4584 	/* set appropriately by the sata framework */
4585 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4586 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4587 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4588 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4589 
4590 	/* initiate the command by writing the command register last */
4591 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4592 
4593 	/* Give the host controller time to do its thing */
4594 	NV_DELAY_NSEC(400);
4595 
4596 	/*
4597 	 * Wait for the device to indicate that it is ready for the command
4598 	 * ATAPI protocol state - HP0: Check_Status_A
4599 	 */
4600 
4601 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4602 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4603 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4604 	    4000000, 0) == B_FALSE) {
4605 		/*
4606 		 * Either an error or device fault occurred or the wait
4607 		 * timed out.  According to the ATAPI protocol, command
4608 		 * completion is also possible.  Other implementations of
4609 		 * this protocol don't handle this last case, so neither
4610 		 * does this code.
4611 		 */
4612 
4613 		if (nv_get8(cmdhdl, nvp->nvp_status) &
4614 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4615 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4616 
4617 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4618 			    "nv_start_pkt_pio: device error (HP0)", NULL);
4619 		} else {
4620 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4621 
4622 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4623 			    "nv_start_pkt_pio: timeout (HP0)", NULL);
4624 		}
4625 
4626 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4627 		nv_complete_io(nvp, spkt, 0);
4628 		nvp->nvp_state |= NV_PORT_RESET;
4629 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4630 		nv_reset(nvp, "start_pkt_pio");
4631 
4632 		return (SATA_TRAN_PORT_ERROR);
4633 	}
4634 
4635 	/*
4636 	 * Put the ATAPI command in the data register
4637 	 * ATAPI protocol state - HP1: Send_Packet
4638 	 */
4639 
4640 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4641 	    (ushort_t *)nvp->nvp_data,
4642 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4643 
4644 	/*
4645 	 * See you in nv_intr_pkt_pio.
4646 	 * ATAPI protocol state - HP3: INTRQ_wait
4647 	 */
4648 
4649 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4650 	    "nv_start_pkt_pio: exiting into HP3", NULL);
4651 
4652 	return (SATA_TRAN_ACCEPTED);
4653 }
4654 
4655 
4656 /*
4657  * Interrupt processing for a non-data ATA command.
4658  */
4659 static void
4660 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4661 {
4662 	uchar_t status;
4663 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4664 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4665 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4666 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4667 
4668 	NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered", NULL);
4669 
4670 	status = nv_get8(cmdhdl, nvp->nvp_status);
4671 
4672 	/*
4673 	 * check for errors
4674 	 */
4675 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4676 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4677 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4678 		    nvp->nvp_altstatus);
4679 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4680 	} else {
4681 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4682 	}
4683 
4684 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4685 }
4686 
4687 
4688 /*
4689  * ATA command, PIO data in
4690  */
4691 static void
4692 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4693 {
4694 	uchar_t	status;
4695 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4696 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4697 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4698 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4699 	int count;
4700 
4701 	status = nv_get8(cmdhdl, nvp->nvp_status);
4702 
4703 	if (status & SATA_STATUS_BSY) {
4704 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4705 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4706 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4707 		    nvp->nvp_altstatus);
4708 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4709 		nvp->nvp_state |= NV_PORT_RESET;
4710 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4711 		nv_reset(nvp, "intr_pio_in");
4712 
4713 		return;
4714 	}
4715 
4716 	/*
4717 	 * check for errors
4718 	 */
4719 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4720 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4721 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4722 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4723 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4724 
4725 		return;
4726 	}
4727 
4728 	/*
4729 	 * read the next chunk of data (if any)
4730 	 */
4731 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4732 
4733 	/*
4734 	 * read count bytes
4735 	 */
4736 	ASSERT(count != 0);
4737 
4738 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4739 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4740 
4741 	nv_slotp->nvslot_v_addr += count;
4742 	nv_slotp->nvslot_byte_count -= count;
4743 
4744 
4745 	if (nv_slotp->nvslot_byte_count != 0) {
4746 		/*
4747 		 * more to transfer.  Wait for next interrupt.
4748 		 */
4749 		return;
4750 	}
4751 
4752 	/*
4753 	 * transfer is complete. wait for the busy bit to settle.
4754 	 */
4755 	NV_DELAY_NSEC(400);
4756 
4757 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4758 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4759 }
4760 
4761 
4762 /*
4763  * ATA command PIO data out
4764  */
4765 static void
4766 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4767 {
4768 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4769 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4770 	uchar_t status;
4771 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4772 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4773 	int count;
4774 
4775 	/*
4776 	 * clear the IRQ
4777 	 */
4778 	status = nv_get8(cmdhdl, nvp->nvp_status);
4779 
4780 	if (status & SATA_STATUS_BSY) {
4781 		/*
4782 		 * this should not happen
4783 		 */
4784 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4785 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4786 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4787 		    nvp->nvp_altstatus);
4788 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4789 
4790 		return;
4791 	}
4792 
4793 	/*
4794 	 * check for errors
4795 	 */
4796 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4797 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4798 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4799 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4800 
4801 		return;
4802 	}
4803 
4804 	/*
4805 	 * this is the condition which signals the drive is
4806 	 * no longer ready to transfer.  Likely that the transfer
4807 	 * completed successfully, but check that byte_count is
4808 	 * zero.
4809 	 */
4810 	if ((status & SATA_STATUS_DRQ) == 0) {
4811 
4812 		if (nv_slotp->nvslot_byte_count == 0) {
4813 			/*
4814 			 * complete; successful transfer
4815 			 */
4816 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4817 		} else {
4818 			/*
4819 			 * error condition, incomplete transfer
4820 			 */
4821 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4822 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4823 		}
4824 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4825 
4826 		return;
4827 	}
4828 
4829 	/*
4830 	 * write the next chunk of data
4831 	 */
4832 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4833 
4834 	/*
4835 	 * read or write count bytes
4836 	 */
4837 
4838 	ASSERT(count != 0);
4839 
4840 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4841 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4842 
4843 	nv_slotp->nvslot_v_addr += count;
4844 	nv_slotp->nvslot_byte_count -= count;
4845 }
4846 
4847 
4848 /*
4849  * ATAPI PACKET command, PIO in/out interrupt
4850  *
4851  * Under normal circumstances, one of four different interrupt scenarios
4852  * will result in this function being called:
4853  *
4854  * 1. Packet command data transfer
4855  * 2. Packet command completion
4856  * 3. Request sense data transfer
4857  * 4. Request sense command completion
4858  */
4859 static void
4860 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4861 {
4862 	uchar_t	status;
4863 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4864 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4865 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4866 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4867 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4868 	uint16_t ctlr_count;
4869 	int count;
4870 
4871 	/* ATAPI protocol state - HP2: Check_Status_B */
4872 
4873 	status = nv_get8(cmdhdl, nvp->nvp_status);
4874 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4875 	    "nv_intr_pkt_pio: status 0x%x", status);
4876 
4877 	if (status & SATA_STATUS_BSY) {
4878 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4879 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4880 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4881 		} else {
4882 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4883 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4884 			nvp->nvp_state |= NV_PORT_RESET;
4885 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
4886 			    NV_PORT_RESET_RETRY);
4887 			nv_reset(nvp, "intr_pkt_pio");
4888 		}
4889 
4890 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4891 		    "nv_intr_pkt_pio: busy - status 0x%x", status);
4892 
4893 		return;
4894 	}
4895 
4896 	if ((status & SATA_STATUS_DF) != 0) {
4897 		/*
4898 		 * On device fault, just clean up and bail.  Request sense
4899 		 * will just default to its NO SENSE initialized value.
4900 		 */
4901 
4902 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4903 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4904 		}
4905 
4906 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4907 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4908 
4909 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4910 		    nvp->nvp_altstatus);
4911 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4912 		    nvp->nvp_error);
4913 
4914 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4915 		    "nv_intr_pkt_pio: device fault", NULL);
4916 
4917 		return;
4918 	}
4919 
4920 	if ((status & SATA_STATUS_ERR) != 0) {
4921 		/*
4922 		 * On command error, figure out whether we are processing a
4923 		 * request sense.  If so, clean up and bail.  Otherwise,
4924 		 * do a REQUEST SENSE.
4925 		 */
4926 
4927 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4928 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4929 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4930 			    NV_FAILURE) {
4931 				nv_copy_registers(nvp, &spkt->satapkt_device,
4932 				    spkt);
4933 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4934 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4935 			}
4936 
4937 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4938 			    nvp->nvp_altstatus);
4939 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4940 			    nvp->nvp_error);
4941 		} else {
4942 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4943 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4944 
4945 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4946 		}
4947 
4948 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4949 		    "nv_intr_pkt_pio: error (status 0x%x)", status);
4950 
4951 		return;
4952 	}
4953 
4954 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4955 		/*
4956 		 * REQUEST SENSE command processing
4957 		 */
4958 
4959 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4960 			/* ATAPI state - HP4: Transfer_Data */
4961 
4962 			/* read the byte count from the controller */
4963 			ctlr_count =
4964 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4965 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4966 
4967 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4968 			    "nv_intr_pkt_pio: ctlr byte count - %d",
4969 			    ctlr_count);
4970 
4971 			if (ctlr_count == 0) {
4972 				/* no data to transfer - some devices do this */
4973 
4974 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4975 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4976 
4977 				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4978 				    "nv_intr_pkt_pio: done (no data)", NULL);
4979 
4980 				return;
4981 			}
4982 
4983 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4984 
4985 			/* transfer the data */
4986 			ddi_rep_get16(cmdhdl,
4987 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4988 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4989 			    DDI_DEV_NO_AUTOINCR);
4990 
4991 			/* consume residual bytes */
4992 			ctlr_count -= count;
4993 
4994 			if (ctlr_count > 0) {
4995 				for (; ctlr_count > 0; ctlr_count -= 2)
4996 					(void) ddi_get16(cmdhdl,
4997 					    (ushort_t *)nvp->nvp_data);
4998 			}
4999 
5000 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5001 			    "nv_intr_pkt_pio: transition to HP2", NULL);
5002 		} else {
5003 			/* still in ATAPI state - HP2 */
5004 
5005 			/*
5006 			 * In order to avoid clobbering the rqsense data
5007 			 * set by the SATA framework, the sense data read
5008 			 * from the device is put in a separate buffer and
5009 			 * copied into the packet after the request sense
5010 			 * command successfully completes.
5011 			 */
5012 			bcopy(nv_slotp->nvslot_rqsense_buff,
5013 			    spkt->satapkt_cmd.satacmd_rqsense,
5014 			    SATA_ATAPI_RQSENSE_LEN);
5015 
5016 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5017 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5018 
5019 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5020 			    "nv_intr_pkt_pio: request sense done", NULL);
5021 		}
5022 
5023 		return;
5024 	}
5025 
5026 	/*
5027 	 * Normal command processing
5028 	 */
5029 
5030 	if ((status & (SATA_STATUS_DRQ)) != 0) {
5031 		/* ATAPI protocol state - HP4: Transfer_Data */
5032 
5033 		/* read the byte count from the controller */
5034 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5035 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5036 
5037 		if (ctlr_count == 0) {
5038 			/* no data to transfer - some devices do this */
5039 
5040 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
5041 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5042 
5043 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5044 			    "nv_intr_pkt_pio: done (no data)", NULL);
5045 
5046 			return;
5047 		}
5048 
5049 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
5050 
5051 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5052 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count);
5053 
5054 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5055 		    "nv_intr_pkt_pio: byte_count 0x%x",
5056 		    nv_slotp->nvslot_byte_count);
5057 
5058 		/* transfer the data */
5059 
5060 		if (direction == SATA_DIR_READ) {
5061 			ddi_rep_get16(cmdhdl,
5062 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5063 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5064 			    DDI_DEV_NO_AUTOINCR);
5065 
5066 			ctlr_count -= count;
5067 
5068 			if (ctlr_count > 0) {
5069 				/* consume remainding bytes */
5070 
5071 				for (; ctlr_count > 0;
5072 				    ctlr_count -= 2)
5073 					(void) ddi_get16(cmdhdl,
5074 					    (ushort_t *)nvp->nvp_data);
5075 
5076 				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5077 				    "nv_intr_pkt_pio: bytes remained", NULL);
5078 			}
5079 		} else {
5080 			ddi_rep_put16(cmdhdl,
5081 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5082 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5083 			    DDI_DEV_NO_AUTOINCR);
5084 		}
5085 
5086 		nv_slotp->nvslot_v_addr += count;
5087 		nv_slotp->nvslot_byte_count -= count;
5088 
5089 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5090 		    "nv_intr_pkt_pio: transition to HP2", NULL);
5091 	} else {
5092 		/* still in ATAPI state - HP2 */
5093 
5094 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
5095 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5096 
5097 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5098 		    "nv_intr_pkt_pio: done", NULL);
5099 	}
5100 }
5101 
5102 /*
5103  * ATA command, DMA data in/out
5104  */
5105 static void
5106 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5107 {
5108 	uchar_t status;
5109 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5110 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5111 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5112 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5113 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5114 	uchar_t	bmicx;
5115 	uchar_t bm_status;
5116 
5117 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5118 
5119 	/*
5120 	 * stop DMA engine.
5121 	 */
5122 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5123 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
5124 
5125 	/*
5126 	 * get the status and clear the IRQ, and check for DMA error
5127 	 */
5128 	status = nv_get8(cmdhdl, nvp->nvp_status);
5129 
5130 	/*
5131 	 * check for drive errors
5132 	 */
5133 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5134 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5135 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5136 		(void) nv_bm_status_clear(nvp);
5137 
5138 		return;
5139 	}
5140 
5141 	bm_status = nv_bm_status_clear(nvp);
5142 
5143 	/*
5144 	 * check for bus master errors
5145 	 */
5146 	if (bm_status & BMISX_IDERR) {
5147 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
5148 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5149 		    nvp->nvp_altstatus);
5150 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5151 		nvp->nvp_state |= NV_PORT_RESET;
5152 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5153 		nv_reset(nvp, "intr_dma");
5154 
5155 		return;
5156 	}
5157 
5158 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
5159 }
5160 
5161 
5162 /*
5163  * Wait for a register of a controller to achieve a specific state.
5164  * To return normally, all the bits in the first sub-mask must be ON,
5165  * all the bits in the second sub-mask must be OFF.
5166  * If timeout_usec microseconds pass without the controller achieving
5167  * the desired bit configuration, return TRUE, else FALSE.
5168  *
5169  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5170  * occur for the first 250 us, then switch over to a sleeping wait.
5171  *
5172  */
5173 int
5174 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5175     int type_wait)
5176 {
5177 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5178 	hrtime_t end, cur, start_sleep, start;
5179 	int first_time = B_TRUE;
5180 	ushort_t val;
5181 
5182 	for (;;) {
5183 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5184 
5185 		if ((val & onbits) == onbits && (val & offbits) == 0) {
5186 
5187 			return (B_TRUE);
5188 		}
5189 
5190 		cur = gethrtime();
5191 
5192 		/*
5193 		 * store the start time and calculate the end
5194 		 * time.  also calculate "start_sleep" which is
5195 		 * the point after which the driver will stop busy
5196 		 * waiting and change to sleep waiting.
5197 		 */
5198 		if (first_time) {
5199 			first_time = B_FALSE;
5200 			/*
5201 			 * start and end are in nanoseconds
5202 			 */
5203 			start = cur;
5204 			end = start + timeout_usec * 1000;
5205 			/*
5206 			 * add 1 ms to start
5207 			 */
5208 			start_sleep =  start + 250000;
5209 
5210 			if (servicing_interrupt()) {
5211 				type_wait = NV_NOSLEEP;
5212 			}
5213 		}
5214 
5215 		if (cur > end) {
5216 
5217 			break;
5218 		}
5219 
5220 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5221 #if ! defined(__lock_lint)
5222 			delay(1);
5223 #endif
5224 		} else {
5225 			drv_usecwait(nv_usec_delay);
5226 		}
5227 	}
5228 
5229 	return (B_FALSE);
5230 }
5231 
5232 
5233 /*
5234  * This is a slightly more complicated version that checks
5235  * for error conditions and bails-out rather than looping
5236  * until the timeout is exceeded.
5237  *
5238  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5239  * occur for the first 250 us, then switch over to a sleeping wait.
5240  */
5241 int
5242 nv_wait3(
5243 	nv_port_t	*nvp,
5244 	uchar_t		onbits1,
5245 	uchar_t		offbits1,
5246 	uchar_t		failure_onbits2,
5247 	uchar_t		failure_offbits2,
5248 	uchar_t		failure_onbits3,
5249 	uchar_t		failure_offbits3,
5250 	uint_t		timeout_usec,
5251 	int		type_wait)
5252 {
5253 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5254 	hrtime_t end, cur, start_sleep, start;
5255 	int first_time = B_TRUE;
5256 	ushort_t val;
5257 
5258 	for (;;) {
5259 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5260 
5261 		/*
5262 		 * check for expected condition
5263 		 */
5264 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5265 
5266 			return (B_TRUE);
5267 		}
5268 
5269 		/*
5270 		 * check for error conditions
5271 		 */
5272 		if ((val & failure_onbits2) == failure_onbits2 &&
5273 		    (val & failure_offbits2) == 0) {
5274 
5275 			return (B_FALSE);
5276 		}
5277 
5278 		if ((val & failure_onbits3) == failure_onbits3 &&
5279 		    (val & failure_offbits3) == 0) {
5280 
5281 			return (B_FALSE);
5282 		}
5283 
5284 		/*
5285 		 * store the start time and calculate the end
5286 		 * time.  also calculate "start_sleep" which is
5287 		 * the point after which the driver will stop busy
5288 		 * waiting and change to sleep waiting.
5289 		 */
5290 		if (first_time) {
5291 			first_time = B_FALSE;
5292 			/*
5293 			 * start and end are in nanoseconds
5294 			 */
5295 			cur = start = gethrtime();
5296 			end = start + timeout_usec * 1000;
5297 			/*
5298 			 * add 1 ms to start
5299 			 */
5300 			start_sleep =  start + 250000;
5301 
5302 			if (servicing_interrupt()) {
5303 				type_wait = NV_NOSLEEP;
5304 			}
5305 		} else {
5306 			cur = gethrtime();
5307 		}
5308 
5309 		if (cur > end) {
5310 
5311 			break;
5312 		}
5313 
5314 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5315 #if ! defined(__lock_lint)
5316 			delay(1);
5317 #endif
5318 		} else {
5319 			drv_usecwait(nv_usec_delay);
5320 		}
5321 	}
5322 
5323 	return (B_FALSE);
5324 }
5325 
5326 
5327 /*
5328  * nv_port_state_change() reports the state of the port to the
5329  * sata module by calling sata_hba_event_notify().  This
5330  * function is called any time the state of the port is changed
5331  */
5332 static void
5333 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5334 {
5335 	sata_device_t sd;
5336 
5337 	NVLOG(NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5338 	    "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5339 	    "time %ld (ticks)", event, addr_type, state, ddi_get_lbolt());
5340 
5341 	bzero((void *)&sd, sizeof (sata_device_t));
5342 	sd.satadev_rev = SATA_DEVICE_REV;
5343 	nv_copy_registers(nvp, &sd, NULL);
5344 
5345 	/*
5346 	 * When NCQ is implemented sactive and snotific field need to be
5347 	 * updated.
5348 	 */
5349 	sd.satadev_addr.cport = nvp->nvp_port_num;
5350 	sd.satadev_addr.qual = addr_type;
5351 	sd.satadev_state = state;
5352 
5353 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5354 }
5355 
5356 
5357 
5358 /*
5359  * Monitor reset progress and signature gathering.
5360  * This function may loop, so it should not be called from interrupt
5361  * context.
5362  *
5363  * Entered with nvp mutex held.
5364  */
5365 static void
5366 nv_monitor_reset(nv_port_t *nvp)
5367 {
5368 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5369 	uint32_t sstatus;
5370 	int send_notification = B_FALSE;
5371 	uint8_t dev_type;
5372 
5373 	sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5374 
5375 	/*
5376 	 * We do not know here the reason for port reset.
5377 	 * Check the link status. The link needs to be active before
5378 	 * we can check the link's status.
5379 	 */
5380 	if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5381 	    (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5382 		/*
5383 		 * Either link is not active or there is no device
5384 		 * If the link remains down for more than NV_LINK_DOWN_TIMEOUT
5385 		 * (milliseconds), abort signature acquisition and complete
5386 		 * reset processing.
5387 		 * The link will go down when COMRESET is sent by nv_reset(),
5388 		 * so it is practically nvp_reset_time milliseconds.
5389 		 */
5390 
5391 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5392 		    NV_LINK_DOWN_TIMEOUT) {
5393 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5394 			    "nv_monitor_reset: no link - ending signature "
5395 			    "acquisition; time after reset %ldms",
5396 			    TICK_TO_MSEC(ddi_get_lbolt() -
5397 			    nvp->nvp_reset_time));
5398 		}
5399 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5400 		    NV_PORT_PROBE | NV_PORT_HOTPLUG_DELAY);
5401 		/*
5402 		 * Else, if the link was lost (i.e. was present before)
5403 		 * the controller should generate a 'remove' interrupt
5404 		 * that will cause the appropriate event notification.
5405 		 */
5406 		return;
5407 	}
5408 
5409 	NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5410 	    "nv_monitor_reset: link up after reset; time %ldms",
5411 	    TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time));
5412 
5413 sig_read:
5414 	if (nvp->nvp_signature != 0) {
5415 		/*
5416 		 * The link is up. The signature was acquired before (device
5417 		 * was present).
5418 		 * But we may need to wait for the signature (D2H FIS) before
5419 		 * accessing the drive.
5420 		 */
5421 		if (nv_wait_for_signature != 0) {
5422 			uint32_t old_signature;
5423 			uint8_t old_type;
5424 
5425 			old_signature = nvp->nvp_signature;
5426 			old_type = nvp->nvp_type;
5427 			nvp->nvp_signature = 0;
5428 			nv_read_signature(nvp);
5429 			if (nvp->nvp_signature == 0) {
5430 				nvp->nvp_signature = old_signature;
5431 				nvp->nvp_type = old_type;
5432 
5433 #ifdef NV_DEBUG
5434 				/* FOR DEBUGGING */
5435 				if (nv_wait_here_forever) {
5436 					drv_usecwait(1000);
5437 					goto sig_read;
5438 				}
5439 #endif
5440 				/*
5441 				 * Wait, but not endlessly.
5442 				 */
5443 				if (TICK_TO_MSEC(ddi_get_lbolt() -
5444 				    nvp->nvp_reset_time) <
5445 				    nv_sig_acquisition_time) {
5446 					drv_usecwait(1000);
5447 					goto sig_read;
5448 				} else if (!(nvp->nvp_state &
5449 				    NV_PORT_RESET_RETRY)) {
5450 					/*
5451 					 * Retry reset.
5452 					 */
5453 					NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5454 					    "nv_monitor_reset: retrying reset "
5455 					    "time after first reset: %ldms",
5456 					    TICK_TO_MSEC(ddi_get_lbolt() -
5457 					    nvp->nvp_reset_time));
5458 					nvp->nvp_state |= NV_PORT_RESET_RETRY;
5459 					nv_reset(nvp, "monitor_reset 1");
5460 					goto sig_read;
5461 				}
5462 
5463 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5464 				    "nv_monitor_reset: terminating signature "
5465 				    "acquisition (1); time after reset: %ldms",
5466 				    TICK_TO_MSEC(ddi_get_lbolt() -
5467 				    nvp->nvp_reset_time));
5468 			} else {
5469 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5470 				    "nv_monitor_reset: signature acquired; "
5471 				    "time after reset: %ldms",
5472 				    TICK_TO_MSEC(ddi_get_lbolt() -
5473 				    nvp->nvp_reset_time));
5474 			}
5475 		}
5476 		/*
5477 		 * Clear reset state, set device reset recovery state
5478 		 */
5479 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5480 		    NV_PORT_PROBE);
5481 		nvp->nvp_state |= NV_PORT_RESTORE;
5482 
5483 		/*
5484 		 * Need to send reset event notification
5485 		 */
5486 		send_notification = B_TRUE;
5487 	} else {
5488 		/*
5489 		 * The link is up. The signature was not acquired before.
5490 		 * We can try to fetch a device signature.
5491 		 */
5492 		dev_type = nvp->nvp_type;
5493 
5494 acquire_signature:
5495 		nv_read_signature(nvp);
5496 		if (nvp->nvp_signature != 0) {
5497 			/*
5498 			 * Got device signature.
5499 			 */
5500 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5501 			    "nv_monitor_reset: signature acquired; "
5502 			    "time after reset: %ldms",
5503 			    TICK_TO_MSEC(ddi_get_lbolt() -
5504 			    nvp->nvp_reset_time));
5505 
5506 			/* Clear internal reset state */
5507 			nvp->nvp_state &=
5508 			    ~(NV_PORT_RESET | NV_PORT_RESET_RETRY);
5509 
5510 			if (dev_type != SATA_DTYPE_NONE) {
5511 				/*
5512 				 * We acquired the signature for a
5513 				 * pre-existing device that was not identified
5514 				 * before and and was reset.
5515 				 * Need to enter the device reset recovery
5516 				 * state and to send the reset notification.
5517 				 */
5518 				nvp->nvp_state |= NV_PORT_RESTORE;
5519 				send_notification = B_TRUE;
5520 			} else {
5521 				/*
5522 				 * Else, We acquired the signature because a new
5523 				 * device was attached (the driver attach or
5524 				 * a hot-plugged device). There is no need to
5525 				 * enter the device reset recovery state or to
5526 				 * send the reset notification, but we may need
5527 				 * to send a device attached notification.
5528 				 */
5529 				if (nvp->nvp_state & NV_PORT_PROBE) {
5530 					nv_port_state_change(nvp,
5531 					    SATA_EVNT_DEVICE_ATTACHED,
5532 					    SATA_ADDR_CPORT, 0);
5533 					nvp->nvp_state &= ~NV_PORT_PROBE;
5534 				}
5535 			}
5536 		} else {
5537 			if (TICK_TO_MSEC(ddi_get_lbolt() -
5538 			    nvp->nvp_reset_time) < nv_sig_acquisition_time) {
5539 				drv_usecwait(1000);
5540 				goto acquire_signature;
5541 			} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
5542 				/*
5543 				 * Some drives may require additional
5544 				 * reset(s) to get a valid signature
5545 				 * (indicating that the drive is ready).
5546 				 * If a drive was not just powered
5547 				 * up, the signature should be available
5548 				 * within few hundred milliseconds
5549 				 * after reset.  Therefore, if more than
5550 				 * NV_SIG_ACQUISITION_TIME has elapsed
5551 				 * while waiting for a signature, reset
5552 				 * device again.
5553 				 */
5554 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5555 				    "nv_monitor_reset: retrying reset "
5556 				    "time after first reset: %ldms",
5557 				    TICK_TO_MSEC(ddi_get_lbolt() -
5558 				    nvp->nvp_reset_time));
5559 				nvp->nvp_state |= NV_PORT_RESET_RETRY;
5560 				nv_reset(nvp, "monitor_reset 2");
5561 				drv_usecwait(1000);
5562 				goto acquire_signature;
5563 			}
5564 			/*
5565 			 * Terminating signature acquisition.
5566 			 * Hopefully, the drive is ready.
5567 			 * The SATA module can deal with this as long as it
5568 			 * knows that some device is attached and a device
5569 			 * responds to commands.
5570 			 */
5571 			if (!(nvp->nvp_state & NV_PORT_PROBE)) {
5572 				send_notification = B_TRUE;
5573 			}
5574 			nvp->nvp_state &= ~(NV_PORT_RESET |
5575 			    NV_PORT_RESET_RETRY);
5576 			nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5577 			if (nvp->nvp_state & NV_PORT_PROBE) {
5578 				nv_port_state_change(nvp,
5579 				    SATA_EVNT_DEVICE_ATTACHED,
5580 				    SATA_ADDR_CPORT, 0);
5581 				nvp->nvp_state &= ~NV_PORT_PROBE;
5582 			}
5583 			nvp->nvp_type = dev_type;
5584 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5585 			    "nv_monitor_reset: terminating signature "
5586 			    "acquisition (2); time after reset: %ldms",
5587 			    TICK_TO_MSEC(ddi_get_lbolt() -
5588 			    nvp->nvp_reset_time));
5589 		}
5590 	}
5591 
5592 	if (send_notification) {
5593 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5594 		    SATA_ADDR_DCPORT,
5595 		    SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5596 	}
5597 
5598 #ifdef SGPIO_SUPPORT
5599 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5600 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5601 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5602 	} else {
5603 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5604 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5605 	}
5606 #endif
5607 }
5608 
5609 
5610 /*
5611  * Send a hotplug (add device) notification at the appropriate time after
5612  * hotplug detection.
5613  * Relies on nvp_reset_time set at a hotplug detection time.
5614  * Called only from nv_timeout when NV_PORT_HOTPLUG_DELAY flag is set in
5615  * the nvp_state.
5616  */
5617 static void
5618 nv_delay_hotplug_notification(nv_port_t *nvp)
5619 {
5620 
5621 	if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5622 	    nv_hotplug_delay) {
5623 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5624 		    "nv_delay_hotplug_notification: notifying framework after "
5625 		    "%dms delay", TICK_TO_MSEC(ddi_get_lbolt() -
5626 		    nvp->nvp_reset_time));
5627 		nvp->nvp_state &= ~NV_PORT_HOTPLUG_DELAY;
5628 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5629 		    SATA_ADDR_CPORT, 0);
5630 	}
5631 }
5632 
5633 /*
5634  * timeout processing:
5635  *
5636  * Check if any packets have crossed a timeout threshold.  If so,
5637  * abort the packet.  This function is not NCQ-aware.
5638  *
5639  * If reset was invoked, call reset monitoring function.
5640  *
5641  * Timeout frequency may be lower for checking packet timeout (1s)
5642  * and higher for reset monitoring (1ms)
5643  *
5644  */
5645 static void
5646 nv_timeout(void *arg)
5647 {
5648 	nv_port_t *nvp = arg;
5649 	nv_slot_t *nv_slotp;
5650 	int next_timeout = NV_ONE_SEC;	/* Default */
5651 	uint16_t int_status;
5652 	uint8_t status, bmstatus;
5653 	static int intr_warn_once = 0;
5654 
5655 	ASSERT(nvp != NULL);
5656 
5657 	mutex_enter(&nvp->nvp_mutex);
5658 	nvp->nvp_timeout_id = 0;
5659 
5660 	/*
5661 	 * If the port is not in the init state, ignore it.
5662 	 */
5663 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5664 		NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5665 		    "nv_timeout: port uninitialized", NULL);
5666 		next_timeout = 0;
5667 
5668 		goto finished;
5669 	}
5670 
5671 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
5672 		nv_monitor_reset(nvp);
5673 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5674 
5675 		goto finished;
5676 	}
5677 
5678 	if ((nvp->nvp_state & NV_PORT_HOTPLUG_DELAY) != 0) {
5679 		nv_delay_hotplug_notification(nvp);
5680 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5681 
5682 		goto finished;
5683 	}
5684 
5685 	/*
5686 	 * Not yet NCQ-aware - there is only one command active.
5687 	 */
5688 	nv_slotp = &(nvp->nvp_slot[0]);
5689 
5690 	/*
5691 	 * perform timeout checking and processing only if there is an
5692 	 * active packet on the port
5693 	 */
5694 	if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL)  {
5695 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5696 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5697 		uint8_t cmd = satacmd->satacmd_cmd_reg;
5698 		uint64_t lba;
5699 
5700 #if ! defined(__lock_lint) && defined(DEBUG)
5701 
5702 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5703 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5704 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5705 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5706 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5707 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5708 #endif
5709 
5710 		/*
5711 		 * timeout not needed if there is a polling thread
5712 		 */
5713 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5714 			next_timeout = 0;
5715 
5716 			goto finished;
5717 		}
5718 
5719 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5720 		    spkt->satapkt_time) {
5721 
5722 			uint32_t serr = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5723 			    nvp->nvp_serror);
5724 
5725 			nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5726 			    "nv_timeout: aborting: "
5727 			    "nvslot_stime: %ld max ticks till timeout: "
5728 			    "%ld cur_time: %ld cmd=%x lba=%d seq=%d",
5729 			    nv_slotp->nvslot_stime,
5730 			    drv_usectohz(MICROSEC *
5731 			    spkt->satapkt_time), ddi_get_lbolt(),
5732 			    cmd, lba, nvp->nvp_seq);
5733 
5734 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5735 			    "nv_timeout: SError at timeout: 0x%x", serr);
5736 
5737 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5738 			    "nv_timeout: previous cmd=%x",
5739 			    nvp->nvp_previous_cmd);
5740 
5741 			if (nvp->nvp_mcp5x_int_status != NULL) {
5742 				status = nv_get8(nvp->nvp_ctl_hdl,
5743 				    nvp->nvp_altstatus);
5744 				bmstatus = nv_get8(nvp->nvp_bm_hdl,
5745 				    nvp->nvp_bmisx);
5746 				int_status = nv_get16(
5747 				    nvp->nvp_ctlp->nvc_bar_hdl[5],
5748 				    nvp->nvp_mcp5x_int_status);
5749 				NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5750 				    "nv_timeout: altstatus %x, bmicx %x, "
5751 				    "int_status %X", status, bmstatus,
5752 				    int_status);
5753 
5754 				if (int_status & MCP5X_INT_COMPLETE) {
5755 					/*
5756 					 * Completion interrupt was missed!
5757 					 * Issue warning message once
5758 					 */
5759 					if (!intr_warn_once) {
5760 						nv_cmn_err(CE_WARN,
5761 						    nvp->nvp_ctlp,
5762 						    nvp,
5763 						    "nv_sata: missing command "
5764 						    "completion interrupt(s)!");
5765 						intr_warn_once = 1;
5766 					}
5767 					NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp,
5768 					    nvp, "timeout detected with "
5769 					    "interrupt ready - calling "
5770 					    "int directly", NULL);
5771 					mutex_exit(&nvp->nvp_mutex);
5772 					(void) mcp5x_intr_port(nvp);
5773 					mutex_enter(&nvp->nvp_mutex);
5774 				} else {
5775 					/*
5776 					 * True timeout and not a missing
5777 					 * interrupt.
5778 					 */
5779 					(void) nv_abort_active(nvp, spkt,
5780 					    SATA_PKT_TIMEOUT, B_TRUE);
5781 				}
5782 			} else {
5783 				(void) nv_abort_active(nvp, spkt,
5784 				    SATA_PKT_TIMEOUT, B_TRUE);
5785 			}
5786 
5787 		} else {
5788 			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5789 			    "nv_timeout:"
5790 			    " still in use so restarting timeout",
5791 			    NULL);
5792 
5793 			next_timeout = NV_ONE_SEC;
5794 		}
5795 	} else {
5796 		/*
5797 		 * there was no active packet, so do not re-enable timeout
5798 		 */
5799 		next_timeout = 0;
5800 		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5801 		    "nv_timeout: no active packet so not re-arming "
5802 		    "timeout", NULL);
5803 	}
5804 
5805 finished:
5806 	if (next_timeout != 0) {
5807 		nv_setup_timeout(nvp, next_timeout);
5808 	}
5809 	mutex_exit(&nvp->nvp_mutex);
5810 }
5811 
5812 
5813 /*
5814  * enable or disable the 3 interrupt types the driver is
5815  * interested in: completion, add and remove.
5816  */
5817 static void
5818 ck804_set_intr(nv_port_t *nvp, int flag)
5819 {
5820 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5821 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5822 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5823 	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5824 	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5825 	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5826 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5827 
5828 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5829 		int_en = nv_get8(bar5_hdl,
5830 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5831 		int_en &= ~intr_bits[port];
5832 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5833 		    int_en);
5834 		return;
5835 	}
5836 
5837 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5838 
5839 	/*
5840 	 * controller level lock also required since access to an 8-bit
5841 	 * interrupt register is shared between both channels.
5842 	 */
5843 	mutex_enter(&nvc->nvc_mutex);
5844 
5845 	if (flag & NV_INTR_CLEAR_ALL) {
5846 		NVLOG(NVDBG_INTR, nvc, nvp,
5847 		    "ck804_set_intr: NV_INTR_CLEAR_ALL", NULL);
5848 
5849 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5850 		    (uint8_t *)(nvc->nvc_ck804_int_status));
5851 
5852 		if (intr_status & clear_all_bits[port]) {
5853 
5854 			nv_put8(nvc->nvc_bar_hdl[5],
5855 			    (uint8_t *)(nvc->nvc_ck804_int_status),
5856 			    clear_all_bits[port]);
5857 
5858 			NVLOG(NVDBG_INTR, nvc, nvp,
5859 			    "interrupt bits cleared %x",
5860 			    intr_status & clear_all_bits[port]);
5861 		}
5862 	}
5863 
5864 	if (flag & NV_INTR_DISABLE) {
5865 		NVLOG(NVDBG_INTR, nvc, nvp,
5866 		    "ck804_set_intr: NV_INTR_DISABLE", NULL);
5867 		int_en = nv_get8(bar5_hdl,
5868 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5869 		int_en &= ~intr_bits[port];
5870 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5871 		    int_en);
5872 	}
5873 
5874 	if (flag & NV_INTR_ENABLE) {
5875 		NVLOG(NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE",
5876 		    NULL);
5877 		int_en = nv_get8(bar5_hdl,
5878 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5879 		int_en |= intr_bits[port];
5880 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5881 		    int_en);
5882 	}
5883 
5884 	mutex_exit(&nvc->nvc_mutex);
5885 }
5886 
5887 
5888 /*
5889  * enable or disable the 3 interrupts the driver is interested in:
5890  * completion interrupt, hot add, and hot remove interrupt.
5891  */
5892 static void
5893 mcp5x_set_intr(nv_port_t *nvp, int flag)
5894 {
5895 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5896 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5897 	uint16_t intr_bits =
5898 	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5899 	uint16_t int_en;
5900 
5901 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5902 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5903 		int_en &= ~intr_bits;
5904 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5905 		return;
5906 	}
5907 
5908 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5909 
5910 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag);
5911 
5912 	if (flag & NV_INTR_CLEAR_ALL) {
5913 		NVLOG(NVDBG_INTR, nvc, nvp,
5914 		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL", NULL);
5915 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5916 	}
5917 
5918 	if (flag & NV_INTR_ENABLE) {
5919 		NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE",
5920 		    NULL);
5921 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5922 		int_en |= intr_bits;
5923 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5924 	}
5925 
5926 	if (flag & NV_INTR_DISABLE) {
5927 		NVLOG(NVDBG_INTR, nvc, nvp,
5928 		    "mcp5x_set_intr: NV_INTR_DISABLE", NULL);
5929 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5930 		int_en &= ~intr_bits;
5931 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5932 	}
5933 }
5934 
5935 
5936 static void
5937 nv_resume(nv_port_t *nvp)
5938 {
5939 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()", NULL);
5940 
5941 	mutex_enter(&nvp->nvp_mutex);
5942 
5943 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5944 		mutex_exit(&nvp->nvp_mutex);
5945 
5946 		return;
5947 	}
5948 
5949 	/* Enable interrupt */
5950 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5951 
5952 	/*
5953 	 * Power may have been removed to the port and the
5954 	 * drive, and/or a drive may have been added or removed.
5955 	 * Force a reset which will cause a probe and re-establish
5956 	 * any state needed on the drive.
5957 	 */
5958 	nvp->nvp_state |= NV_PORT_RESET;
5959 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5960 	nv_reset(nvp, "resume");
5961 
5962 	mutex_exit(&nvp->nvp_mutex);
5963 }
5964 
5965 
5966 static void
5967 nv_suspend(nv_port_t *nvp)
5968 {
5969 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()", NULL);
5970 
5971 	mutex_enter(&nvp->nvp_mutex);
5972 
5973 #ifdef SGPIO_SUPPORT
5974 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5975 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5976 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5977 	}
5978 #endif
5979 
5980 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5981 		mutex_exit(&nvp->nvp_mutex);
5982 
5983 		return;
5984 	}
5985 
5986 	/*
5987 	 * Stop the timeout handler.
5988 	 * (It will be restarted in nv_reset() during nv_resume().)
5989 	 */
5990 	if (nvp->nvp_timeout_id) {
5991 		(void) untimeout(nvp->nvp_timeout_id);
5992 		nvp->nvp_timeout_id = 0;
5993 	}
5994 
5995 	/* Disable interrupt */
5996 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5997 	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
5998 
5999 	mutex_exit(&nvp->nvp_mutex);
6000 }
6001 
6002 
6003 static void
6004 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
6005 {
6006 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6007 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
6008 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
6009 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6010 	uchar_t status;
6011 	struct sata_cmd_flags flags;
6012 
6013 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6014 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
6015 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6016 
6017 	if (spkt == NULL) {
6018 
6019 		return;
6020 	}
6021 
6022 	/*
6023 	 * in the error case, implicitly set the return of regs needed
6024 	 * for error handling.
6025 	 */
6026 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
6027 	    nvp->nvp_altstatus);
6028 
6029 	flags = scmd->satacmd_flags;
6030 
6031 	if (status & SATA_STATUS_ERR) {
6032 		flags.sata_copy_out_lba_low_msb = B_TRUE;
6033 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
6034 		flags.sata_copy_out_lba_high_msb = B_TRUE;
6035 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
6036 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
6037 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
6038 		flags.sata_copy_out_error_reg = B_TRUE;
6039 		flags.sata_copy_out_sec_count_msb = B_TRUE;
6040 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
6041 		scmd->satacmd_status_reg = status;
6042 	}
6043 
6044 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
6045 
6046 		/*
6047 		 * set HOB so that high byte will be read
6048 		 */
6049 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
6050 
6051 		/*
6052 		 * get the requested high bytes
6053 		 */
6054 		if (flags.sata_copy_out_sec_count_msb) {
6055 			scmd->satacmd_sec_count_msb =
6056 			    nv_get8(cmdhdl, nvp->nvp_count);
6057 		}
6058 
6059 		if (flags.sata_copy_out_lba_low_msb) {
6060 			scmd->satacmd_lba_low_msb =
6061 			    nv_get8(cmdhdl, nvp->nvp_sect);
6062 		}
6063 
6064 		if (flags.sata_copy_out_lba_mid_msb) {
6065 			scmd->satacmd_lba_mid_msb =
6066 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
6067 		}
6068 
6069 		if (flags.sata_copy_out_lba_high_msb) {
6070 			scmd->satacmd_lba_high_msb =
6071 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
6072 		}
6073 	}
6074 
6075 	/*
6076 	 * disable HOB so that low byte is read
6077 	 */
6078 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6079 
6080 	/*
6081 	 * get the requested low bytes
6082 	 */
6083 	if (flags.sata_copy_out_sec_count_lsb) {
6084 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6085 	}
6086 
6087 	if (flags.sata_copy_out_lba_low_lsb) {
6088 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6089 	}
6090 
6091 	if (flags.sata_copy_out_lba_mid_lsb) {
6092 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6093 	}
6094 
6095 	if (flags.sata_copy_out_lba_high_lsb) {
6096 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6097 	}
6098 
6099 	/*
6100 	 * get the device register if requested
6101 	 */
6102 	if (flags.sata_copy_out_device_reg) {
6103 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
6104 	}
6105 
6106 	/*
6107 	 * get the error register if requested
6108 	 */
6109 	if (flags.sata_copy_out_error_reg) {
6110 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6111 	}
6112 }
6113 
6114 
6115 /*
6116  * Hot plug and remove interrupts can occur when the device is reset.  Just
6117  * masking the interrupt doesn't always work well because if a
6118  * different interrupt arrives on the other port, the driver can still
6119  * end up checking the state of the other port and discover the hot
6120  * interrupt flag is set even though it was masked.  Checking for recent
6121  * reset activity and then ignoring turns out to be the easiest way.
6122  *
6123  * Entered with nvp mutex held.
6124  */
6125 static void
6126 nv_report_add_remove(nv_port_t *nvp, int flags)
6127 {
6128 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6129 	uint32_t sstatus;
6130 	int i;
6131 	clock_t nv_lbolt = ddi_get_lbolt();
6132 
6133 
6134 	NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove() - "
6135 	    "time (ticks) %d flags %x", nv_lbolt, flags);
6136 
6137 	/*
6138 	 * wait up to 1ms for sstatus to settle and reflect the true
6139 	 * status of the port.  Failure to do so can create confusion
6140 	 * in probe, where the incorrect sstatus value can still
6141 	 * persist.
6142 	 */
6143 	for (i = 0; i < 1000; i++) {
6144 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6145 
6146 		if ((flags == NV_PORT_HOTREMOVED) &&
6147 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
6148 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6149 			break;
6150 		}
6151 
6152 		if ((flags != NV_PORT_HOTREMOVED) &&
6153 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
6154 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6155 			break;
6156 		}
6157 		drv_usecwait(1);
6158 	}
6159 
6160 	NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6161 	    "sstatus took %d us for DEVPRE_PHYCOM to settle", i);
6162 
6163 	if (flags == NV_PORT_HOTREMOVED) {
6164 
6165 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
6166 		    B_FALSE);
6167 
6168 		/*
6169 		 * No device, no point of bothering with device reset
6170 		 */
6171 		nvp->nvp_type = SATA_DTYPE_NONE;
6172 		nvp->nvp_signature = 0;
6173 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
6174 		    NV_PORT_RESTORE);
6175 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6176 		    "nv_report_add_remove() hot removed", NULL);
6177 		nv_port_state_change(nvp,
6178 		    SATA_EVNT_DEVICE_DETACHED,
6179 		    SATA_ADDR_CPORT, 0);
6180 
6181 #ifdef SGPIO_SUPPORT
6182 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6183 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6184 #endif
6185 	} else {
6186 		/*
6187 		 * This is a hot plug or link up indication
6188 		 * Now, re-check the link state - no link, no device
6189 		 */
6190 		if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
6191 		    (SSTATUS_GET_DET(sstatus) == SSTATUS_DET_DEVPRE_PHYCOM)) {
6192 
6193 			if (nvp->nvp_type == SATA_DTYPE_NONE) {
6194 				/*
6195 				 * Real device attach - there was no device
6196 				 * attached to this port before this report
6197 				 */
6198 				NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6199 				    "nv_report_add_remove() new device hot"
6200 				    "plugged", NULL);
6201 				nvp->nvp_reset_time = ddi_get_lbolt();
6202 				if (!(nvp->nvp_state &
6203 				    (NV_PORT_RESET_RETRY | NV_PORT_RESET))) {
6204 
6205 					nvp->nvp_signature = 0;
6206 					if (nv_reset_after_hotplug != 0) {
6207 
6208 						/*
6209 						 * Send reset to obtain a device
6210 						 * signature
6211 						 */
6212 						nvp->nvp_state |=
6213 						    NV_PORT_RESET |
6214 						    NV_PORT_PROBE;
6215 						nv_reset(nvp,
6216 						    "report_add_remove");
6217 					} else {
6218 						nvp->nvp_type =
6219 						    SATA_DTYPE_UNKNOWN;
6220 					}
6221 				}
6222 
6223 				if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6224 					if (nv_reset_after_hotplug == 0) {
6225 						/*
6226 						 * In case a hotplug interrupt
6227 						 * is generated right after a
6228 						 * link is up, delay reporting
6229 						 * a hotplug event to let the
6230 						 * drive to initialize and to
6231 						 * send a D2H FIS with a
6232 						 * signature.
6233 						 * The timeout will issue an
6234 						 * event notification after
6235 						 * the NV_HOTPLUG_DELAY
6236 						 * milliseconds delay.
6237 						 */
6238 						nvp->nvp_state |=
6239 						    NV_PORT_HOTPLUG_DELAY;
6240 						nvp->nvp_type =
6241 						    SATA_DTYPE_UNKNOWN;
6242 						/*
6243 						 * Make sure timer is running.
6244 						 */
6245 						nv_setup_timeout(nvp,
6246 						    NV_ONE_MSEC);
6247 					} else {
6248 						nv_port_state_change(nvp,
6249 						    SATA_EVNT_DEVICE_ATTACHED,
6250 						    SATA_ADDR_CPORT, 0);
6251 					}
6252 				}
6253 				return;
6254 			}
6255 			/*
6256 			 * Otherwise it is a bogus attach, indicating recovered
6257 			 * link loss. No real need to report it after-the-fact.
6258 			 * But we may keep some statistics, or notify the
6259 			 * sata module by reporting LINK_LOST/LINK_ESTABLISHED
6260 			 * events to keep track of such occurrences.
6261 			 * Anyhow, we may want to terminate signature
6262 			 * acquisition.
6263 			 */
6264 			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6265 			    "nv_report_add_remove() ignoring plug interrupt "
6266 			    "- recovered link?", NULL);
6267 
6268 			if (nvp->nvp_state &
6269 			    (NV_PORT_RESET_RETRY | NV_PORT_RESET)) {
6270 				NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6271 				    "nv_report_add_remove() - "
6272 				    "time since last reset %dms",
6273 				    TICK_TO_MSEC(ddi_get_lbolt() -
6274 				    nvp->nvp_reset_time));
6275 				/*
6276 				 * If the driver does not have to wait for
6277 				 * a signature, then terminate reset processing
6278 				 * now.
6279 				 */
6280 				if (nv_wait_for_signature == 0) {
6281 					NVLOG(NVDBG_RESET, nvp->nvp_ctlp,
6282 					    nvp, "nv_report_add_remove() - ",
6283 					    "terminating signature acquisition",
6284 					    ", time after reset: %dms",
6285 					    TICK_TO_MSEC(ddi_get_lbolt() -
6286 					    nvp->nvp_reset_time));
6287 
6288 					nvp->nvp_state &= ~(NV_PORT_RESET |
6289 					    NV_PORT_RESET_RETRY);
6290 
6291 					if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6292 						nvp->nvp_state |=
6293 						    NV_PORT_RESTORE;
6294 						nvp->nvp_state &=
6295 						    ~NV_PORT_PROBE;
6296 
6297 						/*
6298 						 * It is not the initial device
6299 						 * probing, so notify sata
6300 						 * module that device was
6301 						 * reset
6302 						 */
6303 						nv_port_state_change(nvp,
6304 						    SATA_EVNT_DEVICE_RESET,
6305 						    SATA_ADDR_DCPORT,
6306 						    SATA_DSTATE_RESET |
6307 						    SATA_DSTATE_PWR_ACTIVE);
6308 					}
6309 
6310 				}
6311 			}
6312 			return;
6313 		}
6314 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
6315 		    "ignoring add dev interrupt - "
6316 		    "link is down or no device!", NULL);
6317 	}
6318 
6319 }
6320 
6321 /*
6322  * Get request sense data and stuff it the command's sense buffer.
6323  * Start a request sense command in order to get sense data to insert
6324  * in the sata packet's rqsense buffer.  The command completion
6325  * processing is in nv_intr_pkt_pio.
6326  *
6327  * The sata framework provides a function to allocate and set-up a
6328  * request sense packet command. The reasons it is not being used here is:
6329  * a) it cannot be called in an interrupt context and this function is
6330  *    called in an interrupt context.
6331  * b) it allocates DMA resources that are not used here because this is
6332  *    implemented using PIO.
6333  *
6334  * If, in the future, this is changed to use DMA, the sata framework should
6335  * be used to allocate and set-up the error retrieval (request sense)
6336  * command.
6337  */
6338 static int
6339 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6340 {
6341 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6342 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6343 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6344 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6345 
6346 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6347 	    "nv_start_rqsense_pio: start", NULL);
6348 
6349 	/* clear the local request sense buffer before starting the command */
6350 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6351 
6352 	/* Write the request sense PACKET command */
6353 
6354 	/* select the drive */
6355 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6356 
6357 	/* make certain the drive selected */
6358 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6359 	    NV_SEC2USEC(5), 0) == B_FALSE) {
6360 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6361 		    "nv_start_rqsense_pio: drive select failed", NULL);
6362 		return (NV_FAILURE);
6363 	}
6364 
6365 	/* set up the command */
6366 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
6367 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6368 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6369 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
6370 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
6371 
6372 	/* initiate the command by writing the command register last */
6373 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6374 
6375 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6376 	NV_DELAY_NSEC(400);
6377 
6378 	/*
6379 	 * Wait for the device to indicate that it is ready for the command
6380 	 * ATAPI protocol state - HP0: Check_Status_A
6381 	 */
6382 
6383 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6384 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6385 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6386 	    4000000, 0) == B_FALSE) {
6387 		if (nv_get8(cmdhdl, nvp->nvp_status) &
6388 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6389 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6390 			    "nv_start_rqsense_pio: rqsense dev error (HP0)",
6391 			    NULL);
6392 		} else {
6393 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6394 			    "nv_start_rqsense_pio: rqsense timeout (HP0)",
6395 			    NULL);
6396 		}
6397 
6398 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6399 		nv_complete_io(nvp, spkt, 0);
6400 		nvp->nvp_state |= NV_PORT_RESET;
6401 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
6402 		nv_reset(nvp, "rqsense_pio");
6403 
6404 		return (NV_FAILURE);
6405 	}
6406 
6407 	/*
6408 	 * Put the ATAPI command in the data register
6409 	 * ATAPI protocol state - HP1: Send_Packet
6410 	 */
6411 
6412 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6413 	    (ushort_t *)nvp->nvp_data,
6414 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6415 
6416 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6417 	    "nv_start_rqsense_pio: exiting into HP3", NULL);
6418 
6419 	return (NV_SUCCESS);
6420 }
6421 
6422 /*
6423  * quiesce(9E) entry point.
6424  *
6425  * This function is called when the system is single-threaded at high
6426  * PIL with preemption disabled. Therefore, this function must not be
6427  * blocked.
6428  *
6429  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6430  * DDI_FAILURE indicates an error condition and should almost never happen.
6431  */
6432 static int
6433 nv_quiesce(dev_info_t *dip)
6434 {
6435 	int port, instance = ddi_get_instance(dip);
6436 	nv_ctl_t *nvc;
6437 
6438 	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6439 		return (DDI_FAILURE);
6440 
6441 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6442 		nv_port_t *nvp = &(nvc->nvc_port[port]);
6443 		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6444 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6445 		uint32_t sctrl;
6446 
6447 		/*
6448 		 * Stop the controllers from generating interrupts.
6449 		 */
6450 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6451 
6452 		/*
6453 		 * clear signature registers
6454 		 */
6455 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
6456 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6457 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6458 		nv_put8(cmdhdl, nvp->nvp_count, 0);
6459 
6460 		nvp->nvp_signature = 0;
6461 		nvp->nvp_type = 0;
6462 		nvp->nvp_state |= NV_PORT_RESET;
6463 		nvp->nvp_reset_time = ddi_get_lbolt();
6464 
6465 		/*
6466 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
6467 		 */
6468 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6469 
6470 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
6471 		    sctrl | SCONTROL_DET_COMRESET);
6472 
6473 		/*
6474 		 * wait 1ms
6475 		 */
6476 		drv_usecwait(1000);
6477 
6478 		/*
6479 		 * de-assert reset in PHY
6480 		 */
6481 		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6482 	}
6483 
6484 	return (DDI_SUCCESS);
6485 }
6486 
6487 
6488 #ifdef SGPIO_SUPPORT
6489 /*
6490  * NVIDIA specific SGPIO LED support
6491  * Please refer to the NVIDIA documentation for additional details
6492  */
6493 
6494 /*
6495  * nv_sgp_led_init
6496  * Detect SGPIO support.  If present, initialize.
6497  */
6498 static void
6499 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6500 {
6501 	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
6502 	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
6503 	nv_sgp_cmn_t *cmn;	/* shared data structure */
6504 	int i;
6505 	char tqname[SGPIO_TQ_NAME_LEN];
6506 	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6507 
6508 	/*
6509 	 * Initialize with appropriately invalid values in case this function
6510 	 * exits without initializing SGPIO (for example, there is no SGPIO
6511 	 * support).
6512 	 */
6513 	nvc->nvc_sgp_csr = 0;
6514 	nvc->nvc_sgp_cbp = NULL;
6515 	nvc->nvc_sgp_cmn = NULL;
6516 
6517 	/*
6518 	 * Only try to initialize SGPIO LED support if this property
6519 	 * indicates it should be.
6520 	 */
6521 	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6522 	    "enable-sgpio-leds", 0) != 1)
6523 		return;
6524 
6525 	/*
6526 	 * CK804 can pass the sgpio_detect test even though it does not support
6527 	 * SGPIO, so don't even look at a CK804.
6528 	 */
6529 	if (nvc->nvc_mcp5x_flag != B_TRUE)
6530 		return;
6531 
6532 	/*
6533 	 * The NVIDIA SGPIO support can nominally handle 6 drives.
6534 	 * However, the current implementation only supports 4 drives.
6535 	 * With two drives per controller, that means only look at the
6536 	 * first two controllers.
6537 	 */
6538 	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6539 		return;
6540 
6541 	/* confirm that the SGPIO registers are there */
6542 	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6543 		NVLOG(NVDBG_INIT, nvc, NULL,
6544 		    "SGPIO registers not detected", NULL);
6545 		return;
6546 	}
6547 
6548 	/* save off the SGPIO_CSR I/O address */
6549 	nvc->nvc_sgp_csr = csrp;
6550 
6551 	/* map in Control Block */
6552 	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6553 	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6554 
6555 	/* initialize the SGPIO h/w */
6556 	if (nv_sgp_init(nvc) == NV_FAILURE) {
6557 		nv_cmn_err(CE_WARN, nvc, NULL,
6558 		    "Unable to initialize SGPIO");
6559 	}
6560 
6561 	/*
6562 	 * Initialize the shared space for this instance.  This could
6563 	 * involve allocating the space, saving a pointer to the space
6564 	 * and starting the taskq that actually turns the LEDs on and off.
6565 	 * Or, it could involve just getting the pointer to the already
6566 	 * allocated space.
6567 	 */
6568 
6569 	mutex_enter(&nv_sgp_c2c_mutex);
6570 
6571 	/* try and find our CBP in the mapping table */
6572 	cmn = NULL;
6573 	for (i = 0; i < NV_MAX_CBPS; i++) {
6574 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6575 			cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6576 			break;
6577 		}
6578 
6579 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6580 			break;
6581 	}
6582 
6583 	if (i >= NV_MAX_CBPS) {
6584 		/*
6585 		 * CBP to shared space mapping table is full
6586 		 */
6587 		nvc->nvc_sgp_cmn = NULL;
6588 		nv_cmn_err(CE_WARN, nvc, NULL,
6589 		    "LED handling not initialized - too many controllers");
6590 	} else if (cmn == NULL) {
6591 		/*
6592 		 * Allocate the shared space, point the SGPIO scratch register
6593 		 * at it and start the led update taskq.
6594 		 */
6595 
6596 		/* allocate shared space */
6597 		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6598 		    KM_SLEEP);
6599 		if (cmn == NULL) {
6600 			nv_cmn_err(CE_WARN, nvc, NULL,
6601 			    "Failed to allocate shared data");
6602 			return;
6603 		}
6604 
6605 		nvc->nvc_sgp_cmn = cmn;
6606 
6607 		/* initialize the shared data structure */
6608 		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6609 		cmn->nvs_connected = 0;
6610 		cmn->nvs_activity = 0;
6611 		cmn->nvs_cbp = cbp;
6612 
6613 		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6614 		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6615 		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6616 
6617 		/* put the address in the SGPIO scratch register */
6618 #if defined(__amd64)
6619 		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6620 #else
6621 		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6622 #endif
6623 
6624 		/* add an entry to the cbp to cmn mapping table */
6625 
6626 		/* i should be the next available table position */
6627 		nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6628 		nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6629 
6630 		/* start the activity LED taskq */
6631 
6632 		/*
6633 		 * The taskq name should be unique and the time
6634 		 */
6635 		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6636 		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6637 		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6638 		    TASKQ_DEFAULTPRI, 0);
6639 		if (cmn->nvs_taskq == NULL) {
6640 			cmn->nvs_taskq_delay = 0;
6641 			nv_cmn_err(CE_WARN, nvc, NULL,
6642 			    "Failed to start activity LED taskq");
6643 		} else {
6644 			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6645 			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
6646 			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6647 		}
6648 	} else {
6649 		nvc->nvc_sgp_cmn = cmn;
6650 		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6651 	}
6652 
6653 	mutex_exit(&nv_sgp_c2c_mutex);
6654 }
6655 
6656 /*
6657  * nv_sgp_detect
6658  * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6659  * report back whether both were readable.
6660  */
6661 static int
6662 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6663     uint32_t *cbpp)
6664 {
6665 	/* get the SGPIO_CSRP */
6666 	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6667 	if (*csrpp == 0) {
6668 		return (NV_FAILURE);
6669 	}
6670 
6671 	/* SGPIO_CSRP is good, get the SGPIO_CBP */
6672 	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6673 	if (*cbpp == 0) {
6674 		return (NV_FAILURE);
6675 	}
6676 
6677 	/* SGPIO_CBP is good, so we must support SGPIO */
6678 	return (NV_SUCCESS);
6679 }
6680 
6681 /*
6682  * nv_sgp_init
6683  * Initialize SGPIO.
6684  * The initialization process is described by NVIDIA, but the hardware does
6685  * not always behave as documented, so several steps have been changed and/or
6686  * omitted.
6687  */
6688 static int
6689 nv_sgp_init(nv_ctl_t *nvc)
6690 {
6691 	int seq;
6692 	int rval = NV_SUCCESS;
6693 	hrtime_t start, end;
6694 	uint32_t cmd;
6695 	uint32_t status;
6696 	int drive_count;
6697 
6698 	status = nv_sgp_csr_read(nvc);
6699 	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6700 		/* SGPIO logic is in reset state and requires initialization */
6701 
6702 		/* noting the Sequence field value */
6703 		seq = SGPIO_CSR_SEQ(status);
6704 
6705 		/* issue SGPIO_CMD_READ_PARAMS command */
6706 		cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6707 		nv_sgp_csr_write(nvc, cmd);
6708 
6709 		DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6710 
6711 		/* poll for command completion */
6712 		start = gethrtime();
6713 		end = start + NV_SGP_CMD_TIMEOUT;
6714 		for (;;) {
6715 			status = nv_sgp_csr_read(nvc);
6716 
6717 			/* break on error */
6718 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6719 				NVLOG(NVDBG_VERBOSE, nvc, NULL,
6720 				    "Command error during initialization",
6721 				    NULL);
6722 				rval = NV_FAILURE;
6723 				break;
6724 			}
6725 
6726 			/* command processing is taking place */
6727 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6728 				if (SGPIO_CSR_SEQ(status) != seq) {
6729 					NVLOG(NVDBG_VERBOSE, nvc, NULL,
6730 					    "Sequence number change error",
6731 					    NULL);
6732 				}
6733 
6734 				break;
6735 			}
6736 
6737 			/* if completion not detected in 2000ms ... */
6738 
6739 			if (gethrtime() > end)
6740 				break;
6741 
6742 			/* wait 400 ns before checking again */
6743 			NV_DELAY_NSEC(400);
6744 		}
6745 	}
6746 
6747 	if (rval == NV_FAILURE)
6748 		return (rval);
6749 
6750 	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6751 		NVLOG(NVDBG_VERBOSE, nvc, NULL,
6752 		    "SGPIO logic not operational after init - state %d",
6753 		    SGPIO_CSR_SSTAT(status));
6754 		/*
6755 		 * Should return (NV_FAILURE) but the hardware can be
6756 		 * operational even if the SGPIO Status does not indicate
6757 		 * this.
6758 		 */
6759 	}
6760 
6761 	/*
6762 	 * NVIDIA recommends reading the supported drive count even
6763 	 * though they also indicate that it is always 4 at this time.
6764 	 */
6765 	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6766 	if (drive_count != SGPIO_DRV_CNT_VALUE) {
6767 		NVLOG(NVDBG_INIT, nvc, NULL,
6768 		    "SGPIO reported undocumented drive count - %d",
6769 		    drive_count);
6770 	}
6771 
6772 	NVLOG(NVDBG_INIT, nvc, NULL,
6773 	    "initialized ctlr: %d csr: 0x%08x",
6774 	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr);
6775 
6776 	return (rval);
6777 }
6778 
6779 static int
6780 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6781 {
6782 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6783 
6784 	if (cmn == NULL)
6785 		return (NV_FAILURE);
6786 
6787 	mutex_enter(&cmn->nvs_slock);
6788 	cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6789 	mutex_exit(&cmn->nvs_slock);
6790 
6791 	return (NV_SUCCESS);
6792 }
6793 
6794 /*
6795  * nv_sgp_csr_read
6796  * This is just a 32-bit port read from the value that was obtained from the
6797  * PCI config space.
6798  *
6799  * XXX It was advised to use the in[bwl] function for this, even though they
6800  * are obsolete interfaces.
6801  */
6802 static int
6803 nv_sgp_csr_read(nv_ctl_t *nvc)
6804 {
6805 	return (inl(nvc->nvc_sgp_csr));
6806 }
6807 
6808 /*
6809  * nv_sgp_csr_write
6810  * This is just a 32-bit I/O port write.  The port number was obtained from
6811  * the PCI config space.
6812  *
6813  * XXX It was advised to use the out[bwl] function for this, even though they
6814  * are obsolete interfaces.
6815  */
6816 static void
6817 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6818 {
6819 	outl(nvc->nvc_sgp_csr, val);
6820 }
6821 
6822 /*
6823  * nv_sgp_write_data
6824  * Cause SGPIO to send Control Block data
6825  */
6826 static int
6827 nv_sgp_write_data(nv_ctl_t *nvc)
6828 {
6829 	hrtime_t start, end;
6830 	uint32_t status;
6831 	uint32_t cmd;
6832 
6833 	/* issue command */
6834 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6835 	nv_sgp_csr_write(nvc, cmd);
6836 
6837 	/* poll for completion */
6838 	start = gethrtime();
6839 	end = start + NV_SGP_CMD_TIMEOUT;
6840 	for (;;) {
6841 		status = nv_sgp_csr_read(nvc);
6842 
6843 		/* break on error completion */
6844 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6845 			break;
6846 
6847 		/* break on successful completion */
6848 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6849 			break;
6850 
6851 		/* Wait 400 ns and try again */
6852 		NV_DELAY_NSEC(400);
6853 
6854 		if (gethrtime() > end)
6855 			break;
6856 	}
6857 
6858 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6859 		return (NV_SUCCESS);
6860 
6861 	return (NV_FAILURE);
6862 }
6863 
6864 /*
6865  * nv_sgp_activity_led_ctl
6866  * This is run as a taskq.  It wakes up at a fixed interval and checks to
6867  * see if any of the activity LEDs need to be changed.
6868  */
6869 static void
6870 nv_sgp_activity_led_ctl(void *arg)
6871 {
6872 	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6873 	nv_sgp_cmn_t *cmn;
6874 	volatile nv_sgp_cb_t *cbp;
6875 	clock_t ticks;
6876 	uint8_t drv_leds;
6877 	uint32_t old_leds;
6878 	uint32_t new_led_state;
6879 	int i;
6880 
6881 	cmn = nvc->nvc_sgp_cmn;
6882 	cbp = nvc->nvc_sgp_cbp;
6883 
6884 	do {
6885 		/* save off the old state of all of the LEDs */
6886 		old_leds = cbp->sgpio0_tr;
6887 
6888 		DTRACE_PROBE3(sgpio__activity__state,
6889 		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6890 		    int, old_leds);
6891 
6892 		new_led_state = 0;
6893 
6894 		/* for each drive */
6895 		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6896 
6897 			/* get the current state of the LEDs for the drive */
6898 			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6899 
6900 			if ((cmn->nvs_connected & (1 << i)) == 0) {
6901 				/* if not connected, turn off activity */
6902 				drv_leds &= ~TR_ACTIVE_MASK;
6903 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6904 
6905 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6906 				new_led_state |=
6907 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6908 
6909 				continue;
6910 			}
6911 
6912 			if ((cmn->nvs_activity & (1 << i)) == 0) {
6913 				/* connected, but not active */
6914 				drv_leds &= ~TR_ACTIVE_MASK;
6915 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6916 
6917 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6918 				new_led_state |=
6919 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6920 
6921 				continue;
6922 			}
6923 
6924 			/* connected and active */
6925 			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6926 				/* was enabled, so disable */
6927 				drv_leds &= ~TR_ACTIVE_MASK;
6928 				drv_leds |=
6929 				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6930 
6931 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6932 				new_led_state |=
6933 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6934 			} else {
6935 				/* was disabled, so enable */
6936 				drv_leds &= ~TR_ACTIVE_MASK;
6937 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6938 
6939 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6940 				new_led_state |=
6941 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6942 			}
6943 
6944 			/*
6945 			 * clear the activity bit
6946 			 * if there is drive activity again within the
6947 			 * loop interval (now 1/16 second), nvs_activity
6948 			 * will be reset and the "connected and active"
6949 			 * condition above will cause the LED to blink
6950 			 * off and on at the loop interval rate.  The
6951 			 * rate may be increased (interval shortened) as
6952 			 * long as it is not more than 1/30 second.
6953 			 */
6954 			mutex_enter(&cmn->nvs_slock);
6955 			cmn->nvs_activity &= ~(1 << i);
6956 			mutex_exit(&cmn->nvs_slock);
6957 		}
6958 
6959 		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6960 
6961 		/* write out LED values */
6962 
6963 		mutex_enter(&cmn->nvs_slock);
6964 		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6965 		cbp->sgpio0_tr |= new_led_state;
6966 		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6967 		mutex_exit(&cmn->nvs_slock);
6968 
6969 		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6970 			NVLOG(NVDBG_VERBOSE, nvc, NULL,
6971 			    "nv_sgp_write_data failure updating active LED",
6972 			    NULL);
6973 		}
6974 
6975 		/* now rest for the interval */
6976 		mutex_enter(&cmn->nvs_tlock);
6977 		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6978 		if (ticks > 0)
6979 			(void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6980 			    ticks, TR_CLOCK_TICK);
6981 		mutex_exit(&cmn->nvs_tlock);
6982 	} while (ticks > 0);
6983 }
6984 
6985 /*
6986  * nv_sgp_drive_connect
6987  * Set the flag used to indicate that the drive is attached to the HBA.
6988  * Used to let the taskq know that it should turn the Activity LED on.
6989  */
6990 static void
6991 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6992 {
6993 	nv_sgp_cmn_t *cmn;
6994 
6995 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6996 		return;
6997 	cmn = nvc->nvc_sgp_cmn;
6998 
6999 	mutex_enter(&cmn->nvs_slock);
7000 	cmn->nvs_connected |= (1 << drive);
7001 	mutex_exit(&cmn->nvs_slock);
7002 }
7003 
7004 /*
7005  * nv_sgp_drive_disconnect
7006  * Clears the flag used to indicate that the drive is no longer attached
7007  * to the HBA.  Used to let the taskq know that it should turn the
7008  * Activity LED off.  The flag that indicates that the drive is in use is
7009  * also cleared.
7010  */
7011 static void
7012 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
7013 {
7014 	nv_sgp_cmn_t *cmn;
7015 
7016 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7017 		return;
7018 	cmn = nvc->nvc_sgp_cmn;
7019 
7020 	mutex_enter(&cmn->nvs_slock);
7021 	cmn->nvs_connected &= ~(1 << drive);
7022 	cmn->nvs_activity &= ~(1 << drive);
7023 	mutex_exit(&cmn->nvs_slock);
7024 }
7025 
7026 /*
7027  * nv_sgp_drive_active
7028  * Sets the flag used to indicate that the drive has been accessed and the
7029  * LED should be flicked off, then on.  It is cleared at a fixed time
7030  * interval by the LED taskq and set by the sata command start.
7031  */
7032 static void
7033 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
7034 {
7035 	nv_sgp_cmn_t *cmn;
7036 
7037 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7038 		return;
7039 	cmn = nvc->nvc_sgp_cmn;
7040 
7041 	DTRACE_PROBE1(sgpio__active, int, drive);
7042 
7043 	mutex_enter(&cmn->nvs_slock);
7044 	cmn->nvs_activity |= (1 << drive);
7045 	mutex_exit(&cmn->nvs_slock);
7046 }
7047 
7048 
7049 /*
7050  * nv_sgp_locate
7051  * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
7052  * maintained in the SGPIO Control Block.
7053  */
7054 static void
7055 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
7056 {
7057 	uint8_t leds;
7058 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7059 	nv_sgp_cmn_t *cmn;
7060 
7061 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7062 		return;
7063 	cmn = nvc->nvc_sgp_cmn;
7064 
7065 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7066 		return;
7067 
7068 	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
7069 
7070 	mutex_enter(&cmn->nvs_slock);
7071 
7072 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7073 
7074 	leds &= ~TR_LOCATE_MASK;
7075 	leds |= TR_LOCATE_SET(value);
7076 
7077 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7078 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7079 
7080 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7081 
7082 	mutex_exit(&cmn->nvs_slock);
7083 
7084 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7085 		nv_cmn_err(CE_WARN, nvc, NULL,
7086 		    "nv_sgp_write_data failure updating OK2RM/Locate LED");
7087 	}
7088 }
7089 
7090 /*
7091  * nv_sgp_error
7092  * Turns the Error/Failure LED off or on for a particular drive.  State is
7093  * maintained in the SGPIO Control Block.
7094  */
7095 static void
7096 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7097 {
7098 	uint8_t leds;
7099 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7100 	nv_sgp_cmn_t *cmn;
7101 
7102 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7103 		return;
7104 	cmn = nvc->nvc_sgp_cmn;
7105 
7106 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7107 		return;
7108 
7109 	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7110 
7111 	mutex_enter(&cmn->nvs_slock);
7112 
7113 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7114 
7115 	leds &= ~TR_ERROR_MASK;
7116 	leds |= TR_ERROR_SET(value);
7117 
7118 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7119 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7120 
7121 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7122 
7123 	mutex_exit(&cmn->nvs_slock);
7124 
7125 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7126 		nv_cmn_err(CE_WARN, nvc, NULL,
7127 		    "nv_sgp_write_data failure updating Fail/Error LED");
7128 	}
7129 }
7130 
7131 static void
7132 nv_sgp_cleanup(nv_ctl_t *nvc)
7133 {
7134 	int drive, i;
7135 	uint8_t drv_leds;
7136 	uint32_t led_state;
7137 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7138 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7139 	extern void psm_unmap_phys(caddr_t, size_t);
7140 
7141 	/*
7142 	 * If the SGPIO Control Block isn't mapped or the shared data
7143 	 * structure isn't present in this instance, there isn't much that
7144 	 * can be cleaned up.
7145 	 */
7146 	if ((cb == NULL) || (cmn == NULL))
7147 		return;
7148 
7149 	/* turn off activity LEDs for this controller */
7150 	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7151 
7152 	/* get the existing LED state */
7153 	led_state = cb->sgpio0_tr;
7154 
7155 	/* turn off port 0 */
7156 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7157 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7158 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7159 
7160 	/* turn off port 1 */
7161 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7162 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7163 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7164 
7165 	/* set the new led state, which should turn off this ctrl's LEDs */
7166 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7167 	(void) nv_sgp_write_data(nvc);
7168 
7169 	/* clear the controller's in use bit */
7170 	mutex_enter(&cmn->nvs_slock);
7171 	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7172 	mutex_exit(&cmn->nvs_slock);
7173 
7174 	if (cmn->nvs_in_use == 0) {
7175 		/* if all "in use" bits cleared, take everything down */
7176 
7177 		if (cmn->nvs_taskq != NULL) {
7178 			/* allow activity taskq to exit */
7179 			cmn->nvs_taskq_delay = 0;
7180 			cv_broadcast(&cmn->nvs_cv);
7181 
7182 			/* then destroy it */
7183 			ddi_taskq_destroy(cmn->nvs_taskq);
7184 		}
7185 
7186 		/* turn off all of the LEDs */
7187 		cb->sgpio0_tr = 0;
7188 		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7189 		(void) nv_sgp_write_data(nvc);
7190 
7191 		cb->sgpio_sr = NULL;
7192 
7193 		/* zero out the CBP to cmn mapping */
7194 		for (i = 0; i < NV_MAX_CBPS; i++) {
7195 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7196 				nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7197 				break;
7198 			}
7199 
7200 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7201 				break;
7202 		}
7203 
7204 		/* free resources */
7205 		cv_destroy(&cmn->nvs_cv);
7206 		mutex_destroy(&cmn->nvs_tlock);
7207 		mutex_destroy(&cmn->nvs_slock);
7208 
7209 		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7210 	}
7211 
7212 	nvc->nvc_sgp_cmn = NULL;
7213 
7214 	/* unmap the SGPIO Control Block */
7215 	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7216 }
7217 #endif	/* SGPIO_SUPPORT */
7218