xref: /illumos-gate/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c (revision 6148443adeb5d3f493cee0d19110b32a0189bd41)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  *
28  * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
29  * based chipsets.
30  *
31  * NCQ
32  * ---
33  *
34  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
35  * and is likely to be revisited in the future.
36  *
37  *
38  * Power Management
39  * ----------------
40  *
41  * Normally power management would be responsible for ensuring the device
42  * is quiescent and then changing power states to the device, such as
43  * powering down parts or all of the device.  mcp5x/ck804 is unique in
44  * that it is only available as part of a larger southbridge chipset, so
45  * removing power to the device isn't possible.  Switches to control
46  * power management states D0/D3 in the PCI configuration space appear to
47  * be supported but changes to these states are apparently are ignored.
48  * The only further PM that the driver _could_ do is shut down the PHY,
49  * but in order to deliver the first rev of the driver sooner than later,
50  * that will be deferred until some future phase.
51  *
52  * Since the driver currently will not directly change any power state to
53  * the device, no power() entry point will be required.  However, it is
54  * possible that in ACPI power state S3, aka suspend to RAM, that power
55  * can be removed to the device, and the driver cannot rely on BIOS to
56  * have reset any state.  For the time being, there is no known
57  * non-default configurations that need to be programmed.  This judgement
58  * is based on the port of the legacy ata driver not having any such
59  * functionality and based on conversations with the PM team.  If such a
60  * restoration is later deemed necessary it can be incorporated into the
61  * DDI_RESUME processing.
62  *
63  */
64 
65 #include <sys/scsi/scsi.h>
66 #include <sys/pci.h>
67 #include <sys/byteorder.h>
68 #include <sys/sunddi.h>
69 #include <sys/sata/sata_hba.h>
70 #ifdef SGPIO_SUPPORT
71 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
72 #include <sys/devctl.h>
73 #include <sys/sdt.h>
74 #endif
75 #include <sys/sata/adapters/nv_sata/nv_sata.h>
76 #include <sys/disp.h>
77 #include <sys/note.h>
78 #include <sys/promif.h>
79 
80 
81 /*
82  * Function prototypes for driver entry points
83  */
84 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
85 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
86 static int nv_quiesce(dev_info_t *dip);
87 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
88     void *arg, void **result);
89 
90 /*
91  * Function prototypes for entry points from sata service module
92  * These functions are distinguished from other local functions
93  * by the prefix "nv_sata_"
94  */
95 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
96 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
97 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
98 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
99 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
100 
101 /*
102  * Local function prototypes
103  */
104 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
105 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
106 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
107 #ifdef NV_MSI_SUPPORTED
108 static int nv_add_msi_intrs(nv_ctl_t *nvc);
109 #endif
110 static void nv_rem_intrs(nv_ctl_t *nvc);
111 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
112 static int nv_start_nodata(nv_port_t *nvp, int slot);
113 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
114 static int nv_start_pio_in(nv_port_t *nvp, int slot);
115 static int nv_start_pio_out(nv_port_t *nvp, int slot);
116 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
117 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
118 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
119 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
120 static int nv_start_dma(nv_port_t *nvp, int slot);
121 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
122 static void nv_uninit_ctl(nv_ctl_t *nvc);
123 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
124 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125 static void nv_uninit_port(nv_port_t *nvp);
126 static int nv_init_port(nv_port_t *nvp);
127 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
128 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
129 #ifdef NCQ
130 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 #endif
132 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
133 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
134     int state);
135 static void nv_common_reg_init(nv_ctl_t *nvc);
136 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
137 static void nv_reset(nv_port_t *nvp, char *reason);
138 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
139 static void nv_timeout(void *);
140 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
141 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
142 static void nv_read_signature(nv_port_t *nvp);
143 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
144 static void ck804_set_intr(nv_port_t *nvp, int flag);
145 static void nv_resume(nv_port_t *nvp);
146 static void nv_suspend(nv_port_t *nvp);
147 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
148 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
149     int flag);
150 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
151     sata_pkt_t *spkt);
152 static void nv_report_add_remove(nv_port_t *nvp, int flags);
153 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
154 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
155     uchar_t failure_onbits2, uchar_t failure_offbits2,
156     uchar_t failure_onbits3, uchar_t failure_offbits3,
157     uint_t timeout_usec, int type_wait);
158 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
159     uint_t timeout_usec, int type_wait);
160 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
161 static void nv_init_port_link_processing(nv_ctl_t *nvc);
162 static void nv_setup_timeout(nv_port_t *nvp, int time);
163 static void nv_monitor_reset(nv_port_t *nvp);
164 static int nv_bm_status_clear(nv_port_t *nvp);
165 static void nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...);
166 
167 #ifdef SGPIO_SUPPORT
168 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
169 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
170 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
171     cred_t *credp, int *rvalp);
172 
173 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
174 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
175     uint32_t *cbpp);
176 static int nv_sgp_init(nv_ctl_t *nvc);
177 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
178 static int nv_sgp_csr_read(nv_ctl_t *nvc);
179 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
180 static int nv_sgp_write_data(nv_ctl_t *nvc);
181 static void nv_sgp_activity_led_ctl(void *arg);
182 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
183 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
184 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
185 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
186 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
187 static void nv_sgp_cleanup(nv_ctl_t *nvc);
188 #endif
189 
190 
191 /*
192  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
193  * Verify if needed if ported to other ISA.
194  */
195 static ddi_dma_attr_t buffer_dma_attr = {
196 	DMA_ATTR_V0,		/* dma_attr_version */
197 	0,			/* dma_attr_addr_lo: lowest bus address */
198 	0xffffffffull,		/* dma_attr_addr_hi: */
199 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
200 	4,			/* dma_attr_align */
201 	1,			/* dma_attr_burstsizes. */
202 	1,			/* dma_attr_minxfer */
203 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
204 	0xffffffffull,		/* dma_attr_seg */
205 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
206 	512,			/* dma_attr_granular */
207 	0,			/* dma_attr_flags */
208 };
209 static ddi_dma_attr_t buffer_dma_40bit_attr = {
210 	DMA_ATTR_V0,		/* dma_attr_version */
211 	0,			/* dma_attr_addr_lo: lowest bus address */
212 	0xffffffffffull,	/* dma_attr_addr_hi: */
213 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
214 	4,			/* dma_attr_align */
215 	1,			/* dma_attr_burstsizes. */
216 	1,			/* dma_attr_minxfer */
217 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
218 	0xffffffffull,		/* dma_attr_seg */
219 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
220 	512,			/* dma_attr_granular */
221 	0,			/* dma_attr_flags */
222 };
223 
224 
225 /*
226  * DMA attributes for PRD tables
227  */
228 ddi_dma_attr_t nv_prd_dma_attr = {
229 	DMA_ATTR_V0,		/* dma_attr_version */
230 	0,			/* dma_attr_addr_lo */
231 	0xffffffffull,		/* dma_attr_addr_hi */
232 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
233 	4,			/* dma_attr_align */
234 	1,			/* dma_attr_burstsizes */
235 	1,			/* dma_attr_minxfer */
236 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
237 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
238 	1,			/* dma_attr_sgllen */
239 	1,			/* dma_attr_granular */
240 	0			/* dma_attr_flags */
241 };
242 
243 /*
244  * Device access attributes
245  */
246 static ddi_device_acc_attr_t accattr = {
247     DDI_DEVICE_ATTR_V0,
248     DDI_STRUCTURE_LE_ACC,
249     DDI_STRICTORDER_ACC
250 };
251 
252 
253 #ifdef SGPIO_SUPPORT
254 static struct cb_ops nv_cb_ops = {
255 	nv_open,		/* open */
256 	nv_close,		/* close */
257 	nodev,			/* strategy (block) */
258 	nodev,			/* print (block) */
259 	nodev,			/* dump (block) */
260 	nodev,			/* read */
261 	nodev,			/* write */
262 	nv_ioctl,		/* ioctl */
263 	nodev,			/* devmap */
264 	nodev,			/* mmap */
265 	nodev,			/* segmap */
266 	nochpoll,		/* chpoll */
267 	ddi_prop_op,		/* prop_op */
268 	NULL,			/* streams */
269 	D_NEW | D_MP |
270 	D_64BIT | D_HOTPLUG,	/* flags */
271 	CB_REV			/* rev */
272 };
273 #endif  /* SGPIO_SUPPORT */
274 
275 
276 static struct dev_ops nv_dev_ops = {
277 	DEVO_REV,		/* devo_rev */
278 	0,			/* refcnt  */
279 	nv_getinfo,		/* info */
280 	nulldev,		/* identify */
281 	nulldev,		/* probe */
282 	nv_attach,		/* attach */
283 	nv_detach,		/* detach */
284 	nodev,			/* no reset */
285 #ifdef SGPIO_SUPPORT
286 	&nv_cb_ops,		/* driver operations */
287 #else
288 	(struct cb_ops *)0,	/* driver operations */
289 #endif
290 	NULL,			/* bus operations */
291 	NULL,			/* power */
292 	nv_quiesce		/* quiesce */
293 };
294 
295 
296 /*
297  * Request Sense CDB for ATAPI
298  */
299 static const uint8_t nv_rqsense_cdb[16] = {
300 	SCMD_REQUEST_SENSE,
301 	0,
302 	0,
303 	0,
304 	SATA_ATAPI_MIN_RQSENSE_LEN,
305 	0,
306 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
307 };
308 
309 
310 static sata_tran_hotplug_ops_t nv_hotplug_ops;
311 
312 extern struct mod_ops mod_driverops;
313 
314 static  struct modldrv modldrv = {
315 	&mod_driverops,	/* driverops */
316 	"Nvidia ck804/mcp51/mcp55 HBA",
317 	&nv_dev_ops,	/* driver ops */
318 };
319 
320 static  struct modlinkage modlinkage = {
321 	MODREV_1,
322 	&modldrv,
323 	NULL
324 };
325 
326 
327 /*
328  * Wait for a signature.
329  * If this variable is non-zero, the driver will wait for a device signature
330  * before reporting a device reset to the sata module.
331  * Some (most?) drives will not process commands sent to them before D2H FIS
332  * is sent to a host.
333  */
334 int nv_wait_for_signature = 1;
335 
336 /*
337  * Check for a signature availability.
338  * If this variable is non-zero, the driver will check task file error register
339  * for indication of a signature availability before reading a signature.
340  * Task file error register bit 0 set to 1 indicates that the drive
341  * is ready and it has sent the D2H FIS with a signature.
342  * This behavior of the error register is not reliable in the mcp5x controller.
343  */
344 int nv_check_tfr_error = 0;
345 
346 /*
347  * Max signature acquisition time, in milliseconds.
348  * The driver will try to acquire a device signature within specified time and
349  * quit acquisition operation if signature was not acquired.
350  */
351 long nv_sig_acquisition_time = NV_SIG_ACQUISITION_TIME;
352 
353 /*
354  * If this variable is non-zero, the driver will wait for a signature in the
355  * nv_monitor_reset function without any time limit.
356  * Used for debugging and drive evaluation.
357  */
358 int nv_wait_here_forever = 0;
359 
360 /*
361  * Reset after hotplug.
362  * If this variable is non-zero, driver will reset device after hotplug
363  * (device attached) interrupt.
364  * If the variable is zero, driver will not reset the new device nor will it
365  * try to read device signature.
366  * Chipset is generating a hotplug (device attached) interrupt with a delay, so
367  * the device should have already sent the D2H FIS with the signature.
368  */
369 int nv_reset_after_hotplug = 1;
370 
371 /*
372  * Delay after device hotplug.
373  * It specifies the time between detecting a hotplugged device and sending
374  * a notification to the SATA module.
375  * It is used when device is not reset after hotpugging and acquiring signature
376  * may be unreliable. The delay should be long enough for a device to become
377  * ready to accept commands.
378  */
379 int nv_hotplug_delay = NV_HOTPLUG_DELAY;
380 
381 
382 /*
383  * Maximum number of consecutive interrupts processed in the loop in the
384  * single invocation of the port interrupt routine.
385  */
386 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
387 
388 
389 
390 /*
391  * wait between checks of reg status
392  */
393 int nv_usec_delay = NV_WAIT_REG_CHECK;
394 
395 /*
396  * The following is needed for nv_vcmn_err()
397  */
398 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
399 static char nv_log_buf[NV_LOGBUF_LEN];
400 int nv_debug_flags =
401     NVDBG_HOT|NVDBG_RESET|NVDBG_ALWAYS|NVDBG_TIMEOUT|NVDBG_EVENT;
402 int nv_log_to_console = B_FALSE;
403 
404 int nv_prom_print = B_FALSE;
405 
406 /*
407  * for debugging
408  */
409 #ifdef DEBUG
410 int ncq_commands = 0;
411 int non_ncq_commands = 0;
412 #endif
413 
414 /*
415  * Opaque state pointer to be initialized by ddi_soft_state_init()
416  */
417 static void *nv_statep	= NULL;
418 
419 /*
420  * Map from CBP to shared space
421  *
422  * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
423  * Control Block Pointer as well as the corresponding Control Block) that
424  * is shared across all driver instances associated with that part.  The
425  * Control Block is used to update and query the LED state for the devices
426  * on the controllers associated with those instances.  There is also some
427  * driver state (called the 'common' area here) associated with each SGPIO
428  * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
429  * control area.
430  *
431  * The driver can also use this mapping array to determine whether the
432  * common area for a given CBP has been initialized, and, if it isn't
433  * initialized, initialize it.
434  *
435  * When a driver instance with a CBP value that is already in the array is
436  * initialized, it will use the pointer to the previously initialized common
437  * area associated with that SGPIO CBP value, rather than initialize it
438  * itself.
439  *
440  * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
441  */
442 #ifdef SGPIO_SUPPORT
443 static kmutex_t nv_sgp_c2c_mutex;
444 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
445 #endif
446 
447 /* We still have problems in 40-bit DMA support, so disable it by default */
448 int nv_sata_40bit_dma = B_TRUE;
449 
450 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
451 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
452 	nv_sata_activate,	/* activate port. cfgadm -c connect */
453 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
454 };
455 
456 
457 /*
458  *  nv module initialization
459  */
460 int
461 _init(void)
462 {
463 	int	error;
464 #ifdef SGPIO_SUPPORT
465 	int	i;
466 #endif
467 
468 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
469 
470 	if (error != 0) {
471 
472 		return (error);
473 	}
474 
475 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
476 #ifdef SGPIO_SUPPORT
477 	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
478 
479 	for (i = 0; i < NV_MAX_CBPS; i++) {
480 		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
481 		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
482 	}
483 #endif
484 
485 	if ((error = sata_hba_init(&modlinkage)) != 0) {
486 		ddi_soft_state_fini(&nv_statep);
487 		mutex_destroy(&nv_log_mutex);
488 
489 		return (error);
490 	}
491 
492 	error = mod_install(&modlinkage);
493 	if (error != 0) {
494 		sata_hba_fini(&modlinkage);
495 		ddi_soft_state_fini(&nv_statep);
496 		mutex_destroy(&nv_log_mutex);
497 
498 		return (error);
499 	}
500 
501 	return (error);
502 }
503 
504 
505 /*
506  * nv module uninitialize
507  */
508 int
509 _fini(void)
510 {
511 	int	error;
512 
513 	error = mod_remove(&modlinkage);
514 
515 	if (error != 0) {
516 		return (error);
517 	}
518 
519 	/*
520 	 * remove the resources allocated in _init()
521 	 */
522 	mutex_destroy(&nv_log_mutex);
523 #ifdef SGPIO_SUPPORT
524 	mutex_destroy(&nv_sgp_c2c_mutex);
525 #endif
526 	sata_hba_fini(&modlinkage);
527 	ddi_soft_state_fini(&nv_statep);
528 
529 	return (error);
530 }
531 
532 
533 /*
534  * nv _info entry point
535  */
536 int
537 _info(struct modinfo *modinfop)
538 {
539 	return (mod_info(&modlinkage, modinfop));
540 }
541 
542 
543 /*
544  * these wrappers for ddi_{get,put}8 are for observability
545  * with dtrace
546  */
547 #ifdef DEBUG
548 
549 static void
550 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
551 {
552 	ddi_put8(handle, dev_addr, value);
553 }
554 
555 static void
556 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
557 {
558 	ddi_put32(handle, dev_addr, value);
559 }
560 
561 static uint32_t
562 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
563 {
564 	return (ddi_get32(handle, dev_addr));
565 }
566 
567 static void
568 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
569 {
570 	ddi_put16(handle, dev_addr, value);
571 }
572 
573 static uint16_t
574 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
575 {
576 	return (ddi_get16(handle, dev_addr));
577 }
578 
579 static uint8_t
580 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
581 {
582 	return (ddi_get8(handle, dev_addr));
583 }
584 
585 #else
586 
587 #define	nv_put8 ddi_put8
588 #define	nv_put32 ddi_put32
589 #define	nv_get32 ddi_get32
590 #define	nv_put16 ddi_put16
591 #define	nv_get16 ddi_get16
592 #define	nv_get8 ddi_get8
593 
594 #endif
595 
596 
597 /*
598  * Driver attach
599  */
600 static int
601 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
602 {
603 	int status, attach_state, intr_types, bar, i, command;
604 	int inst = ddi_get_instance(dip);
605 	ddi_acc_handle_t pci_conf_handle;
606 	nv_ctl_t *nvc;
607 	uint8_t subclass;
608 	uint32_t reg32;
609 #ifdef SGPIO_SUPPORT
610 	pci_regspec_t *regs;
611 	int rlen;
612 #endif
613 
614 	switch (cmd) {
615 
616 	case DDI_ATTACH:
617 
618 		attach_state = ATTACH_PROGRESS_NONE;
619 
620 		status = ddi_soft_state_zalloc(nv_statep, inst);
621 
622 		if (status != DDI_SUCCESS) {
623 			break;
624 		}
625 
626 		nvc = ddi_get_soft_state(nv_statep, inst);
627 
628 		nvc->nvc_dip = dip;
629 
630 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach(): DDI_ATTACH", NULL);
631 
632 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
633 
634 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
635 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
636 			    PCI_CONF_REVID);
637 			NVLOG(NVDBG_INIT, nvc, NULL,
638 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
639 			    inst, nvc->nvc_revid, nv_debug_flags);
640 		} else {
641 			break;
642 		}
643 
644 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
645 
646 		/*
647 		 * Set the PCI command register: enable IO/MEM/Master.
648 		 */
649 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
650 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
651 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
652 
653 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
654 
655 		if (subclass & PCI_MASS_RAID) {
656 			cmn_err(CE_WARN,
657 			    "attach failed: RAID mode not supported");
658 
659 			break;
660 		}
661 
662 		/*
663 		 * the 6 bars of the controller are:
664 		 * 0: port 0 task file
665 		 * 1: port 0 status
666 		 * 2: port 1 task file
667 		 * 3: port 1 status
668 		 * 4: bus master for both ports
669 		 * 5: extended registers for SATA features
670 		 */
671 		for (bar = 0; bar < 6; bar++) {
672 			status = ddi_regs_map_setup(dip, bar + 1,
673 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
674 			    &nvc->nvc_bar_hdl[bar]);
675 
676 			if (status != DDI_SUCCESS) {
677 				NVLOG(NVDBG_INIT, nvc, NULL,
678 				    "ddi_regs_map_setup failure for bar"
679 				    " %d status = %d", bar, status);
680 				break;
681 			}
682 		}
683 
684 		attach_state |= ATTACH_PROGRESS_BARS;
685 
686 		/*
687 		 * initialize controller structures
688 		 */
689 		status = nv_init_ctl(nvc, pci_conf_handle);
690 
691 		if (status == NV_FAILURE) {
692 			NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl failed",
693 			    NULL);
694 
695 			break;
696 		}
697 
698 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
699 
700 		/*
701 		 * initialize mutexes
702 		 */
703 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
704 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
705 
706 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
707 
708 		/*
709 		 * get supported interrupt types
710 		 */
711 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
712 		    DDI_SUCCESS) {
713 			nv_cmn_err(CE_WARN, nvc, NULL,
714 			    "ddi_intr_get_supported_types failed");
715 
716 			break;
717 		}
718 
719 		NVLOG(NVDBG_INIT, nvc, NULL,
720 		    "ddi_intr_get_supported_types() returned: 0x%x",
721 		    intr_types);
722 
723 #ifdef NV_MSI_SUPPORTED
724 		if (intr_types & DDI_INTR_TYPE_MSI) {
725 			NVLOG(NVDBG_INIT, nvc, NULL,
726 			    "using MSI interrupt type", NULL);
727 
728 			/*
729 			 * Try MSI first, but fall back to legacy if MSI
730 			 * attach fails
731 			 */
732 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
733 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
734 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
735 				NVLOG(NVDBG_INIT, nvc, NULL,
736 				    "MSI interrupt setup done", NULL);
737 			} else {
738 				nv_cmn_err(CE_CONT, nvc, NULL,
739 				    "MSI registration failed "
740 				    "will try Legacy interrupts");
741 			}
742 		}
743 #endif
744 
745 		/*
746 		 * Either the MSI interrupt setup has failed or only
747 		 * the fixed interrupts are available on the system.
748 		 */
749 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
750 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
751 
752 			NVLOG(NVDBG_INIT, nvc, NULL,
753 			    "using Legacy interrupt type", NULL);
754 
755 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
756 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
757 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
758 				NVLOG(NVDBG_INIT, nvc, NULL,
759 				    "Legacy interrupt setup done", NULL);
760 			} else {
761 				nv_cmn_err(CE_WARN, nvc, NULL,
762 				    "legacy interrupt setup failed");
763 				NVLOG(NVDBG_INIT, nvc, NULL,
764 				    "legacy interrupt setup failed", NULL);
765 				break;
766 			}
767 		}
768 
769 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
770 			NVLOG(NVDBG_INIT, nvc, NULL,
771 			    "no interrupts registered", NULL);
772 			break;
773 		}
774 
775 #ifdef SGPIO_SUPPORT
776 		/*
777 		 * save off the controller number
778 		 */
779 		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
780 		    "reg", (caddr_t)&regs, &rlen);
781 		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
782 		kmem_free(regs, rlen);
783 
784 		/*
785 		 * initialize SGPIO
786 		 */
787 		nv_sgp_led_init(nvc, pci_conf_handle);
788 #endif	/* SGPIO_SUPPORT */
789 
790 		/*
791 		 * Initiate link processing and device identification
792 		 */
793 		nv_init_port_link_processing(nvc);
794 		/*
795 		 * attach to sata module
796 		 */
797 		if (sata_hba_attach(nvc->nvc_dip,
798 		    &nvc->nvc_sata_hba_tran,
799 		    DDI_ATTACH) != DDI_SUCCESS) {
800 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
801 
802 			break;
803 		}
804 
805 		pci_config_teardown(&pci_conf_handle);
806 
807 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS", NULL);
808 
809 		return (DDI_SUCCESS);
810 
811 	case DDI_RESUME:
812 
813 		nvc = ddi_get_soft_state(nv_statep, inst);
814 
815 		NVLOG(NVDBG_INIT, nvc, NULL,
816 		    "nv_attach(): DDI_RESUME inst %d", inst);
817 
818 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
819 			return (DDI_FAILURE);
820 		}
821 
822 		/*
823 		 * Set the PCI command register: enable IO/MEM/Master.
824 		 */
825 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
826 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
827 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
828 
829 		/*
830 		 * Need to set bit 2 to 1 at config offset 0x50
831 		 * to enable access to the bar5 registers.
832 		 */
833 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
834 
835 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
836 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
837 			    reg32 | NV_BAR5_SPACE_EN);
838 		}
839 
840 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
841 
842 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
843 			nv_resume(&(nvc->nvc_port[i]));
844 		}
845 
846 		pci_config_teardown(&pci_conf_handle);
847 
848 		return (DDI_SUCCESS);
849 
850 	default:
851 		return (DDI_FAILURE);
852 	}
853 
854 
855 	/*
856 	 * DDI_ATTACH failure path starts here
857 	 */
858 
859 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
860 		nv_rem_intrs(nvc);
861 	}
862 
863 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
864 		/*
865 		 * Remove timers
866 		 */
867 		int port = 0;
868 		nv_port_t *nvp;
869 
870 		for (; port < NV_MAX_PORTS(nvc); port++) {
871 			nvp = &(nvc->nvc_port[port]);
872 			if (nvp->nvp_timeout_id != 0) {
873 				(void) untimeout(nvp->nvp_timeout_id);
874 			}
875 		}
876 	}
877 
878 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
879 		mutex_destroy(&nvc->nvc_mutex);
880 	}
881 
882 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
883 		nv_uninit_ctl(nvc);
884 	}
885 
886 	if (attach_state & ATTACH_PROGRESS_BARS) {
887 		while (--bar >= 0) {
888 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
889 		}
890 	}
891 
892 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
893 		ddi_soft_state_free(nv_statep, inst);
894 	}
895 
896 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
897 		pci_config_teardown(&pci_conf_handle);
898 	}
899 
900 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
901 
902 	return (DDI_FAILURE);
903 }
904 
905 
906 static int
907 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
908 {
909 	int i, port, inst = ddi_get_instance(dip);
910 	nv_ctl_t *nvc;
911 	nv_port_t *nvp;
912 
913 	nvc = ddi_get_soft_state(nv_statep, inst);
914 
915 	switch (cmd) {
916 
917 	case DDI_DETACH:
918 
919 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH", NULL);
920 
921 		/*
922 		 * Remove interrupts
923 		 */
924 		nv_rem_intrs(nvc);
925 
926 		/*
927 		 * Remove timers
928 		 */
929 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
930 			nvp = &(nvc->nvc_port[port]);
931 			if (nvp->nvp_timeout_id != 0) {
932 				(void) untimeout(nvp->nvp_timeout_id);
933 			}
934 		}
935 
936 		/*
937 		 * Remove maps
938 		 */
939 		for (i = 0; i < 6; i++) {
940 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
941 		}
942 
943 		/*
944 		 * Destroy mutexes
945 		 */
946 		mutex_destroy(&nvc->nvc_mutex);
947 
948 		/*
949 		 * Uninitialize the controller structures
950 		 */
951 		nv_uninit_ctl(nvc);
952 
953 #ifdef SGPIO_SUPPORT
954 		/*
955 		 * release SGPIO resources
956 		 */
957 		nv_sgp_cleanup(nvc);
958 #endif
959 
960 		/*
961 		 * unregister from the sata module
962 		 */
963 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
964 
965 		/*
966 		 * Free soft state
967 		 */
968 		ddi_soft_state_free(nv_statep, inst);
969 
970 		return (DDI_SUCCESS);
971 
972 	case DDI_SUSPEND:
973 
974 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND", NULL);
975 
976 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
977 			nv_suspend(&(nvc->nvc_port[i]));
978 		}
979 
980 		nvc->nvc_state |= NV_CTRL_SUSPEND;
981 
982 		return (DDI_SUCCESS);
983 
984 	default:
985 		return (DDI_FAILURE);
986 	}
987 }
988 
989 
990 /*ARGSUSED*/
991 static int
992 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
993 {
994 	nv_ctl_t *nvc;
995 	int instance;
996 	dev_t dev;
997 
998 	dev = (dev_t)arg;
999 	instance = getminor(dev);
1000 
1001 	switch (infocmd) {
1002 	case DDI_INFO_DEVT2DEVINFO:
1003 		nvc = ddi_get_soft_state(nv_statep,  instance);
1004 		if (nvc != NULL) {
1005 			*result = nvc->nvc_dip;
1006 			return (DDI_SUCCESS);
1007 		} else {
1008 			*result = NULL;
1009 			return (DDI_FAILURE);
1010 		}
1011 	case DDI_INFO_DEVT2INSTANCE:
1012 		*(int *)result = instance;
1013 		break;
1014 	default:
1015 		break;
1016 	}
1017 	return (DDI_SUCCESS);
1018 }
1019 
1020 
1021 #ifdef SGPIO_SUPPORT
1022 /* ARGSUSED */
1023 static int
1024 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1025 {
1026 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1027 
1028 	if (nvc == NULL) {
1029 		return (ENXIO);
1030 	}
1031 
1032 	return (0);
1033 }
1034 
1035 
1036 /* ARGSUSED */
1037 static int
1038 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1039 {
1040 	return (0);
1041 }
1042 
1043 
1044 /* ARGSUSED */
1045 static int
1046 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1047 {
1048 	nv_ctl_t *nvc;
1049 	int inst;
1050 	int status;
1051 	int ctlr, port;
1052 	int drive;
1053 	uint8_t curr_led;
1054 	struct dc_led_ctl led;
1055 
1056 	inst = getminor(dev);
1057 	if (inst == -1) {
1058 		return (EBADF);
1059 	}
1060 
1061 	nvc = ddi_get_soft_state(nv_statep, inst);
1062 	if (nvc == NULL) {
1063 		return (EBADF);
1064 	}
1065 
1066 	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1067 		return (EIO);
1068 	}
1069 
1070 	switch (cmd) {
1071 	case DEVCTL_SET_LED:
1072 		status = ddi_copyin((void *)arg, &led,
1073 		    sizeof (struct dc_led_ctl), mode);
1074 		if (status != 0)
1075 			return (EFAULT);
1076 
1077 		/*
1078 		 * Since only the first two controller currently support
1079 		 * SGPIO (as per NVIDIA docs), this code will as well.
1080 		 * Note that this validate the port value within led_state
1081 		 * as well.
1082 		 */
1083 
1084 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1085 		if ((ctlr != 0) && (ctlr != 1))
1086 			return (ENXIO);
1087 
1088 		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1089 		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1090 			return (EINVAL);
1091 		}
1092 
1093 		drive = led.led_number;
1094 
1095 		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1096 		    (led.led_state == DCL_STATE_OFF)) {
1097 
1098 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1099 				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1100 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1101 				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1102 			} else {
1103 				return (ENXIO);
1104 			}
1105 
1106 			port = SGP_DRV_TO_PORT(led.led_number);
1107 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1108 		}
1109 
1110 		if (led.led_ctl_active == DCL_CNTRL_ON) {
1111 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1112 				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1113 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1114 				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1115 			} else {
1116 				return (ENXIO);
1117 			}
1118 
1119 			port = SGP_DRV_TO_PORT(led.led_number);
1120 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1121 		}
1122 
1123 		break;
1124 
1125 	case DEVCTL_GET_LED:
1126 		status = ddi_copyin((void *)arg, &led,
1127 		    sizeof (struct dc_led_ctl), mode);
1128 		if (status != 0)
1129 			return (EFAULT);
1130 
1131 		/*
1132 		 * Since only the first two controller currently support
1133 		 * SGPIO (as per NVIDIA docs), this code will as well.
1134 		 * Note that this validate the port value within led_state
1135 		 * as well.
1136 		 */
1137 
1138 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1139 		if ((ctlr != 0) && (ctlr != 1))
1140 			return (ENXIO);
1141 
1142 		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1143 		    led.led_number);
1144 
1145 		port = SGP_DRV_TO_PORT(led.led_number);
1146 		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1147 			led.led_ctl_active = DCL_CNTRL_ON;
1148 
1149 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1150 				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1151 					led.led_state = DCL_STATE_OFF;
1152 				else
1153 					led.led_state = DCL_STATE_ON;
1154 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1155 				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1156 					led.led_state = DCL_STATE_OFF;
1157 				else
1158 					led.led_state = DCL_STATE_ON;
1159 			} else {
1160 				return (ENXIO);
1161 			}
1162 		} else {
1163 			led.led_ctl_active = DCL_CNTRL_OFF;
1164 			/*
1165 			 * Not really off, but never set and no constant for
1166 			 * tri-state
1167 			 */
1168 			led.led_state = DCL_STATE_OFF;
1169 		}
1170 
1171 		status = ddi_copyout(&led, (void *)arg,
1172 		    sizeof (struct dc_led_ctl), mode);
1173 		if (status != 0)
1174 			return (EFAULT);
1175 
1176 		break;
1177 
1178 	case DEVCTL_NUM_LEDS:
1179 		led.led_number = SGPIO_DRV_CNT_VALUE;
1180 		led.led_ctl_active = 1;
1181 		led.led_type = 3;
1182 
1183 		/*
1184 		 * According to documentation, NVIDIA SGPIO is supposed to
1185 		 * support blinking, but it does not seem to work in practice.
1186 		 */
1187 		led.led_state = DCL_STATE_ON;
1188 
1189 		status = ddi_copyout(&led, (void *)arg,
1190 		    sizeof (struct dc_led_ctl), mode);
1191 		if (status != 0)
1192 			return (EFAULT);
1193 
1194 		break;
1195 
1196 	default:
1197 		return (EINVAL);
1198 	}
1199 
1200 	return (0);
1201 }
1202 #endif	/* SGPIO_SUPPORT */
1203 
1204 
1205 /*
1206  * Called by sata module to probe a port.  Port and device state
1207  * are not changed here... only reported back to the sata module.
1208  *
1209  */
1210 static int
1211 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1212 {
1213 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1214 	uint8_t cport = sd->satadev_addr.cport;
1215 	uint8_t pmport = sd->satadev_addr.pmport;
1216 	uint8_t qual = sd->satadev_addr.qual;
1217 	nv_port_t *nvp;
1218 
1219 	if (cport >= NV_MAX_PORTS(nvc)) {
1220 		sd->satadev_type = SATA_DTYPE_NONE;
1221 		sd->satadev_state = SATA_STATE_UNKNOWN;
1222 
1223 		return (SATA_FAILURE);
1224 	}
1225 
1226 	ASSERT(nvc->nvc_port != NULL);
1227 	nvp = &(nvc->nvc_port[cport]);
1228 	ASSERT(nvp != NULL);
1229 
1230 	NVLOG(NVDBG_ENTRY, nvc, nvp,
1231 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1232 	    "qual: 0x%x", cport, pmport, qual);
1233 
1234 	mutex_enter(&nvp->nvp_mutex);
1235 
1236 	/*
1237 	 * This check seems to be done in the SATA module.
1238 	 * It may not be required here
1239 	 */
1240 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1241 		nv_cmn_err(CE_WARN, nvc, nvp,
1242 		    "port inactive.  Use cfgadm to activate");
1243 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1244 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1245 		mutex_exit(&nvp->nvp_mutex);
1246 
1247 		return (SATA_SUCCESS);
1248 	}
1249 
1250 	if (nvp->nvp_state & NV_PORT_FAILED) {
1251 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
1252 		    "probe: port failed", NULL);
1253 		sd->satadev_type = SATA_DTYPE_NONE;
1254 		sd->satadev_state = SATA_PSTATE_FAILED;
1255 		mutex_exit(&nvp->nvp_mutex);
1256 
1257 		return (SATA_SUCCESS);
1258 	}
1259 
1260 	if (qual == SATA_ADDR_PMPORT) {
1261 		sd->satadev_type = SATA_DTYPE_NONE;
1262 		sd->satadev_state = SATA_STATE_UNKNOWN;
1263 		mutex_exit(&nvp->nvp_mutex);
1264 		nv_cmn_err(CE_WARN, nvc, nvp,
1265 		    "controller does not support port multiplier");
1266 
1267 		return (SATA_SUCCESS);
1268 	}
1269 
1270 	sd->satadev_state = SATA_PSTATE_PWRON;
1271 
1272 	nv_copy_registers(nvp, sd, NULL);
1273 
1274 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
1275 		/*
1276 		 * We are waiting for reset to complete and to fetch
1277 		 * a signature.
1278 		 * Reset will cause the link to go down for a short period of
1279 		 * time.  If reset processing continues for less than
1280 		 * NV_LINK_DOWN_TIMEOUT, fake the status of the link so that
1281 		 * we will not report intermittent link down.
1282 		 * Maybe we should report previous link state?
1283 		 */
1284 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) <
1285 		    NV_LINK_DOWN_TIMEOUT) {
1286 			SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1287 			    SSTATUS_IPM_ACTIVE);
1288 			SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1289 			    SSTATUS_DET_DEVPRE_PHYCOM);
1290 			sd->satadev_type = nvp->nvp_type;
1291 			mutex_exit(&nvp->nvp_mutex);
1292 
1293 			return (SATA_SUCCESS);
1294 		}
1295 	}
1296 	/*
1297 	 * Just report the current port state
1298 	 */
1299 	sd->satadev_type = nvp->nvp_type;
1300 	sd->satadev_state = nvp->nvp_state | SATA_PSTATE_PWRON;
1301 	mutex_exit(&nvp->nvp_mutex);
1302 
1303 #ifdef SGPIO_SUPPORT
1304 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
1305 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1306 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1307 	} else {
1308 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1309 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1310 	}
1311 #endif
1312 
1313 	return (SATA_SUCCESS);
1314 }
1315 
1316 
1317 /*
1318  * Called by sata module to start a new command.
1319  */
1320 static int
1321 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1322 {
1323 	int cport = spkt->satapkt_device.satadev_addr.cport;
1324 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1325 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1326 	int ret;
1327 
1328 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1329 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg);
1330 
1331 	mutex_enter(&nvp->nvp_mutex);
1332 
1333 	/*
1334 	 * record number of commands for debugging
1335 	 */
1336 	nvp->nvp_seq++;
1337 
1338 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1339 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1340 		NVLOG(NVDBG_ERRS, nvc, nvp,
1341 		    "nv_sata_start: port not yet initialized", NULL);
1342 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1343 		mutex_exit(&nvp->nvp_mutex);
1344 
1345 		return (SATA_TRAN_PORT_ERROR);
1346 	}
1347 
1348 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1349 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1350 		NVLOG(NVDBG_ERRS, nvc, nvp,
1351 		    "nv_sata_start: NV_PORT_INACTIVE", NULL);
1352 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1353 		mutex_exit(&nvp->nvp_mutex);
1354 
1355 		return (SATA_TRAN_PORT_ERROR);
1356 	}
1357 
1358 	if (nvp->nvp_state & NV_PORT_FAILED) {
1359 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1360 		NVLOG(NVDBG_ERRS, nvc, nvp,
1361 		    "nv_sata_start: NV_PORT_FAILED state", NULL);
1362 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1363 		mutex_exit(&nvp->nvp_mutex);
1364 
1365 		return (SATA_TRAN_PORT_ERROR);
1366 	}
1367 
1368 	if (nvp->nvp_state & NV_PORT_RESET) {
1369 		NVLOG(NVDBG_ERRS, nvc, nvp,
1370 		    "still waiting for reset completion", NULL);
1371 		spkt->satapkt_reason = SATA_PKT_BUSY;
1372 		mutex_exit(&nvp->nvp_mutex);
1373 
1374 		/*
1375 		 * If in panic, timeouts do not occur, so fake one
1376 		 * so that the signature can be acquired to complete
1377 		 * the reset handling.
1378 		 */
1379 		if (ddi_in_panic()) {
1380 			nv_timeout(nvp);
1381 		}
1382 
1383 		return (SATA_TRAN_BUSY);
1384 	}
1385 
1386 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1387 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1388 		NVLOG(NVDBG_ERRS, nvc, nvp,
1389 		    "nv_sata_start: SATA_DTYPE_NONE", NULL);
1390 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1391 		mutex_exit(&nvp->nvp_mutex);
1392 
1393 		return (SATA_TRAN_PORT_ERROR);
1394 	}
1395 
1396 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1397 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1398 		nv_cmn_err(CE_WARN, nvc, nvp,
1399 		    "port multipliers not supported by controller");
1400 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1401 		mutex_exit(&nvp->nvp_mutex);
1402 
1403 		return (SATA_TRAN_CMD_UNSUPPORTED);
1404 	}
1405 
1406 	/*
1407 	 * after a device reset, and then when sata module restore processing
1408 	 * is complete, the sata module will set sata_clear_dev_reset which
1409 	 * indicates that restore processing has completed and normal
1410 	 * non-restore related commands should be processed.
1411 	 */
1412 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1413 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1414 		NVLOG(NVDBG_RESET, nvc, nvp,
1415 		    "nv_sata_start: clearing NV_PORT_RESTORE", NULL);
1416 	}
1417 
1418 	/*
1419 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1420 	 * only allow commands which restore device state.  The sata module
1421 	 * marks such commands with with sata_ignore_dev_reset.
1422 	 *
1423 	 * during coredump, nv_reset is called and but then the restore
1424 	 * doesn't happen.  For now, workaround by ignoring the wait for
1425 	 * restore if the system is panicing.
1426 	 */
1427 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1428 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1429 	    (ddi_in_panic() == 0)) {
1430 		spkt->satapkt_reason = SATA_PKT_BUSY;
1431 		NVLOG(NVDBG_RESET, nvc, nvp,
1432 		    "nv_sata_start: waiting for restore ", NULL);
1433 		mutex_exit(&nvp->nvp_mutex);
1434 
1435 		return (SATA_TRAN_BUSY);
1436 	}
1437 
1438 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1439 		spkt->satapkt_reason = SATA_PKT_BUSY;
1440 		NVLOG(NVDBG_ERRS, nvc, nvp,
1441 		    "nv_sata_start: NV_PORT_ABORTING", NULL);
1442 		mutex_exit(&nvp->nvp_mutex);
1443 
1444 		return (SATA_TRAN_BUSY);
1445 	}
1446 
1447 	/* Clear SError to be able to check errors after the command failure */
1448 	nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1449 
1450 	if (spkt->satapkt_op_mode &
1451 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1452 
1453 		ret = nv_start_sync(nvp, spkt);
1454 
1455 		mutex_exit(&nvp->nvp_mutex);
1456 
1457 		return (ret);
1458 	}
1459 
1460 	/*
1461 	 * start command asynchronous command
1462 	 */
1463 	ret = nv_start_async(nvp, spkt);
1464 
1465 	mutex_exit(&nvp->nvp_mutex);
1466 
1467 	return (ret);
1468 }
1469 
1470 
1471 /*
1472  * SATA_OPMODE_POLLING implies the driver is in a
1473  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1474  * If only SATA_OPMODE_SYNCH is set, the driver can use
1475  * interrupts and sleep wait on a cv.
1476  *
1477  * If SATA_OPMODE_POLLING is set, the driver can't use
1478  * interrupts and must busy wait and simulate the
1479  * interrupts by waiting for BSY to be cleared.
1480  *
1481  * Synchronous mode has to return BUSY if there are
1482  * any other commands already on the drive.
1483  */
1484 static int
1485 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1486 {
1487 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1488 	int ret;
1489 
1490 	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry",
1491 	    NULL);
1492 
1493 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1494 		spkt->satapkt_reason = SATA_PKT_BUSY;
1495 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1496 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1497 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1498 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1499 		    (&(nvp->nvp_slot[0]))->nvslot_spkt);
1500 
1501 		return (SATA_TRAN_BUSY);
1502 	}
1503 
1504 	/*
1505 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1506 	 */
1507 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1508 	    servicing_interrupt()) {
1509 		spkt->satapkt_reason = SATA_PKT_BUSY;
1510 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1511 		    "SYNC mode not allowed during interrupt", NULL);
1512 
1513 		return (SATA_TRAN_BUSY);
1514 
1515 	}
1516 
1517 	/*
1518 	 * disable interrupt generation if in polled mode
1519 	 */
1520 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1521 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1522 	}
1523 
1524 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1525 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1526 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1527 		}
1528 
1529 		return (ret);
1530 	}
1531 
1532 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1533 		mutex_exit(&nvp->nvp_mutex);
1534 		ret = nv_poll_wait(nvp, spkt);
1535 		mutex_enter(&nvp->nvp_mutex);
1536 
1537 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1538 
1539 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1540 		    " done % reason %d", ret);
1541 
1542 		return (ret);
1543 	}
1544 
1545 	/*
1546 	 * non-polling synchronous mode handling.  The interrupt will signal
1547 	 * when the IO is completed.
1548 	 */
1549 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1550 
1551 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1552 
1553 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1554 	}
1555 
1556 	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1557 	    " done % reason %d", spkt->satapkt_reason);
1558 
1559 	return (SATA_TRAN_ACCEPTED);
1560 }
1561 
1562 
1563 static int
1564 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1565 {
1566 	int ret;
1567 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1568 #if ! defined(__lock_lint)
1569 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1570 #endif
1571 
1572 	NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter", NULL);
1573 
1574 	for (;;) {
1575 
1576 		NV_DELAY_NSEC(400);
1577 
1578 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait",
1579 		    NULL);
1580 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1581 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1582 			mutex_enter(&nvp->nvp_mutex);
1583 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1584 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1585 			nvp->nvp_state |= NV_PORT_RESET;
1586 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1587 			    NV_PORT_RESET_RETRY);
1588 			nv_reset(nvp, "poll_wait");
1589 			nv_complete_io(nvp, spkt, 0);
1590 			mutex_exit(&nvp->nvp_mutex);
1591 			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1592 			    "SATA_STATUS_BSY", NULL);
1593 
1594 			return (SATA_TRAN_ACCEPTED);
1595 		}
1596 
1597 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr",
1598 		    NULL);
1599 
1600 		/*
1601 		 * Simulate interrupt.
1602 		 */
1603 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1604 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr",
1605 		    NULL);
1606 
1607 		if (ret != DDI_INTR_CLAIMED) {
1608 			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1609 			    " unclaimed -- resetting", NULL);
1610 			mutex_enter(&nvp->nvp_mutex);
1611 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1612 			nvp->nvp_state |= NV_PORT_RESET;
1613 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1614 			    NV_PORT_RESET_RETRY);
1615 			nv_reset(nvp, "poll_wait intr not claimed");
1616 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1617 			nv_complete_io(nvp, spkt, 0);
1618 			mutex_exit(&nvp->nvp_mutex);
1619 
1620 			return (SATA_TRAN_ACCEPTED);
1621 		}
1622 
1623 #if ! defined(__lock_lint)
1624 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1625 			/*
1626 			 * packet is complete
1627 			 */
1628 			return (SATA_TRAN_ACCEPTED);
1629 		}
1630 #endif
1631 	}
1632 	/*NOTREACHED*/
1633 }
1634 
1635 
1636 /*
1637  * Called by sata module to abort outstanding packets.
1638  */
1639 /*ARGSUSED*/
1640 static int
1641 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1642 {
1643 	int cport = spkt->satapkt_device.satadev_addr.cport;
1644 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1645 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1646 	int c_a, ret;
1647 
1648 	ASSERT(cport < NV_MAX_PORTS(nvc));
1649 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt);
1650 
1651 	mutex_enter(&nvp->nvp_mutex);
1652 
1653 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1654 		mutex_exit(&nvp->nvp_mutex);
1655 		nv_cmn_err(CE_WARN, nvc, nvp,
1656 		    "abort request failed: port inactive");
1657 
1658 		return (SATA_FAILURE);
1659 	}
1660 
1661 	/*
1662 	 * spkt == NULL then abort all commands
1663 	 */
1664 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1665 
1666 	if (c_a) {
1667 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1668 		    "packets aborted running=%d", c_a);
1669 		ret = SATA_SUCCESS;
1670 	} else {
1671 		if (spkt == NULL) {
1672 			NVLOG(NVDBG_ENTRY, nvc, nvp, "no spkts to abort", NULL);
1673 		} else {
1674 			NVLOG(NVDBG_ENTRY, nvc, nvp,
1675 			    "can't find spkt to abort", NULL);
1676 		}
1677 		ret = SATA_FAILURE;
1678 	}
1679 
1680 	mutex_exit(&nvp->nvp_mutex);
1681 
1682 	return (ret);
1683 }
1684 
1685 
1686 /*
1687  * if spkt == NULL abort all pkts running, otherwise
1688  * abort the requested packet.  must be called with nv_mutex
1689  * held and returns with it held.  Not NCQ aware.
1690  */
1691 static int
1692 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason, int flag)
1693 {
1694 	int aborted = 0, i, reset_once = B_FALSE;
1695 	struct nv_slot *nv_slotp;
1696 	sata_pkt_t *spkt_slot;
1697 
1698 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1699 
1700 	/*
1701 	 * return if the port is not configured
1702 	 */
1703 	if (nvp->nvp_slot == NULL) {
1704 		NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1705 		    "nv_abort_active: not configured so returning", NULL);
1706 
1707 		return (0);
1708 	}
1709 
1710 	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active", NULL);
1711 
1712 	nvp->nvp_state |= NV_PORT_ABORTING;
1713 
1714 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1715 
1716 		nv_slotp = &(nvp->nvp_slot[i]);
1717 		spkt_slot = nv_slotp->nvslot_spkt;
1718 
1719 		/*
1720 		 * skip if not active command in slot
1721 		 */
1722 		if (spkt_slot == NULL) {
1723 			continue;
1724 		}
1725 
1726 		/*
1727 		 * if a specific packet was requested, skip if
1728 		 * this is not a match
1729 		 */
1730 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1731 			continue;
1732 		}
1733 
1734 		/*
1735 		 * stop the hardware.  This could need reworking
1736 		 * when NCQ is enabled in the driver.
1737 		 */
1738 		if (reset_once == B_FALSE) {
1739 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1740 
1741 			/*
1742 			 * stop DMA engine
1743 			 */
1744 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1745 
1746 			/*
1747 			 * Reset only if explicitly specified by the arg flag
1748 			 */
1749 			if (flag == B_TRUE) {
1750 				reset_once = B_TRUE;
1751 				nvp->nvp_state |= NV_PORT_RESET;
1752 				nvp->nvp_state &= ~(NV_PORT_RESTORE |
1753 				    NV_PORT_RESET_RETRY);
1754 				nv_reset(nvp, "abort_active");
1755 			}
1756 		}
1757 
1758 		spkt_slot->satapkt_reason = abort_reason;
1759 		nv_complete_io(nvp, spkt_slot, i);
1760 		aborted++;
1761 	}
1762 
1763 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1764 
1765 	return (aborted);
1766 }
1767 
1768 
1769 /*
1770  * Called by sata module to reset a port, device, or the controller.
1771  */
1772 static int
1773 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1774 {
1775 	int cport = sd->satadev_addr.cport;
1776 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1777 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1778 	int ret = SATA_SUCCESS;
1779 
1780 	ASSERT(cport < NV_MAX_PORTS(nvc));
1781 
1782 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_reset", NULL);
1783 
1784 	mutex_enter(&nvp->nvp_mutex);
1785 
1786 	switch (sd->satadev_addr.qual) {
1787 
1788 	case SATA_ADDR_CPORT:
1789 		/*FALLTHROUGH*/
1790 	case SATA_ADDR_DCPORT:
1791 		nvp->nvp_state |= NV_PORT_RESET;
1792 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1793 		nv_reset(nvp, "sata_reset");
1794 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1795 
1796 		break;
1797 	case SATA_ADDR_CNTRL:
1798 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1799 		    "nv_sata_reset: conroller reset not supported", NULL);
1800 
1801 		break;
1802 	case SATA_ADDR_PMPORT:
1803 	case SATA_ADDR_DPMPORT:
1804 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1805 		    "nv_sata_reset: port multipliers not supported", NULL);
1806 		/*FALLTHROUGH*/
1807 	default:
1808 		/*
1809 		 * unsupported case
1810 		 */
1811 		ret = SATA_FAILURE;
1812 		break;
1813 	}
1814 
1815 	if (ret == SATA_SUCCESS) {
1816 		/*
1817 		 * If the port is inactive, do a quiet reset and don't attempt
1818 		 * to wait for reset completion or do any post reset processing
1819 		 */
1820 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1821 			nvp->nvp_state &= ~NV_PORT_RESET;
1822 			nvp->nvp_reset_time = 0;
1823 		}
1824 
1825 		/*
1826 		 * clear the port failed flag
1827 		 */
1828 		nvp->nvp_state &= ~NV_PORT_FAILED;
1829 	}
1830 
1831 	mutex_exit(&nvp->nvp_mutex);
1832 
1833 	return (ret);
1834 }
1835 
1836 
1837 /*
1838  * Sata entry point to handle port activation.  cfgadm -c connect
1839  */
1840 static int
1841 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1842 {
1843 	int cport = sd->satadev_addr.cport;
1844 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1845 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1846 
1847 	ASSERT(cport < NV_MAX_PORTS(nvc));
1848 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_activate", NULL);
1849 
1850 	mutex_enter(&nvp->nvp_mutex);
1851 
1852 	sd->satadev_state = SATA_STATE_READY;
1853 
1854 	nv_copy_registers(nvp, sd, NULL);
1855 
1856 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1857 
1858 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1859 	/* Initiate link probing and device signature acquisition */
1860 	nvp->nvp_type = SATA_DTYPE_NONE;
1861 	nvp->nvp_signature = 0;
1862 	nvp->nvp_state |= NV_PORT_RESET; /* | NV_PORT_PROBE; */
1863 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
1864 	nv_reset(nvp, "sata_activate");
1865 
1866 	mutex_exit(&nvp->nvp_mutex);
1867 
1868 	return (SATA_SUCCESS);
1869 }
1870 
1871 
1872 /*
1873  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1874  */
1875 static int
1876 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1877 {
1878 	int cport = sd->satadev_addr.cport;
1879 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1880 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1881 
1882 	ASSERT(cport < NV_MAX_PORTS(nvc));
1883 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate", NULL);
1884 
1885 	mutex_enter(&nvp->nvp_mutex);
1886 
1887 	(void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1888 
1889 	/*
1890 	 * make the device inaccessible
1891 	 */
1892 	nvp->nvp_state |= NV_PORT_INACTIVE;
1893 
1894 	/*
1895 	 * disable the interrupts on port
1896 	 */
1897 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1898 
1899 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1900 	nv_copy_registers(nvp, sd, NULL);
1901 
1902 	mutex_exit(&nvp->nvp_mutex);
1903 
1904 	return (SATA_SUCCESS);
1905 }
1906 
1907 
1908 /*
1909  * find an empty slot in the driver's queue, increment counters,
1910  * and then invoke the appropriate PIO or DMA start routine.
1911  */
1912 static int
1913 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1914 {
1915 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1916 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1917 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1918 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1919 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1920 	nv_slot_t *nv_slotp;
1921 	boolean_t dma_cmd;
1922 
1923 	NVLOG(NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1924 	    sata_cmdp->satacmd_cmd_reg);
1925 
1926 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1927 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1928 		nvp->nvp_ncq_run++;
1929 		/*
1930 		 * search for an empty NCQ slot.  by the time, it's already
1931 		 * been determined by the caller that there is room on the
1932 		 * queue.
1933 		 */
1934 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1935 		    on_bit <<= 1) {
1936 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1937 				break;
1938 			}
1939 		}
1940 
1941 		/*
1942 		 * the first empty slot found, should not exceed the queue
1943 		 * depth of the drive.  if it does it's an error.
1944 		 */
1945 		ASSERT(slot != nvp->nvp_queue_depth);
1946 
1947 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1948 		    nvp->nvp_sactive);
1949 		ASSERT((sactive & on_bit) == 0);
1950 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1951 		NVLOG(NVDBG_DELIVER, nvc, nvp, "setting SACTIVE onbit: %X",
1952 		    on_bit);
1953 		nvp->nvp_sactive_cache |= on_bit;
1954 
1955 		ncq = NVSLOT_NCQ;
1956 
1957 	} else {
1958 		nvp->nvp_non_ncq_run++;
1959 		slot = 0;
1960 	}
1961 
1962 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1963 
1964 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1965 
1966 	nv_slotp->nvslot_spkt = spkt;
1967 	nv_slotp->nvslot_flags = ncq;
1968 
1969 	/*
1970 	 * the sata module doesn't indicate which commands utilize the
1971 	 * DMA engine, so find out using this switch table.
1972 	 */
1973 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1974 	case SATAC_READ_DMA_EXT:
1975 	case SATAC_WRITE_DMA_EXT:
1976 	case SATAC_WRITE_DMA:
1977 	case SATAC_READ_DMA:
1978 	case SATAC_READ_DMA_QUEUED:
1979 	case SATAC_READ_DMA_QUEUED_EXT:
1980 	case SATAC_WRITE_DMA_QUEUED:
1981 	case SATAC_WRITE_DMA_QUEUED_EXT:
1982 	case SATAC_READ_FPDMA_QUEUED:
1983 	case SATAC_WRITE_FPDMA_QUEUED:
1984 		dma_cmd = B_TRUE;
1985 		break;
1986 	default:
1987 		dma_cmd = B_FALSE;
1988 	}
1989 
1990 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1991 		NVLOG(NVDBG_DELIVER, nvc,  nvp, "DMA command", NULL);
1992 		nv_slotp->nvslot_start = nv_start_dma;
1993 		nv_slotp->nvslot_intr = nv_intr_dma;
1994 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1995 		NVLOG(NVDBG_DELIVER, nvc,  nvp, "packet command", NULL);
1996 		nv_slotp->nvslot_start = nv_start_pkt_pio;
1997 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1998 		if ((direction == SATA_DIR_READ) ||
1999 		    (direction == SATA_DIR_WRITE)) {
2000 			nv_slotp->nvslot_byte_count =
2001 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2002 			nv_slotp->nvslot_v_addr =
2003 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2004 			/*
2005 			 * Freeing DMA resources allocated by the framework
2006 			 * now to avoid buffer overwrite (dma sync) problems
2007 			 * when the buffer is released at command completion.
2008 			 * Primarily an issue on systems with more than
2009 			 * 4GB of memory.
2010 			 */
2011 			sata_free_dma_resources(spkt);
2012 		}
2013 	} else if (direction == SATA_DIR_NODATA_XFER) {
2014 		NVLOG(NVDBG_DELIVER, nvc, nvp, "non-data command", NULL);
2015 		nv_slotp->nvslot_start = nv_start_nodata;
2016 		nv_slotp->nvslot_intr = nv_intr_nodata;
2017 	} else if (direction == SATA_DIR_READ) {
2018 		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio in command", NULL);
2019 		nv_slotp->nvslot_start = nv_start_pio_in;
2020 		nv_slotp->nvslot_intr = nv_intr_pio_in;
2021 		nv_slotp->nvslot_byte_count =
2022 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2023 		nv_slotp->nvslot_v_addr =
2024 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2025 		/*
2026 		 * Freeing DMA resources allocated by the framework now to
2027 		 * avoid buffer overwrite (dma sync) problems when the buffer
2028 		 * is released at command completion.  This is not an issue
2029 		 * for write because write does not update the buffer.
2030 		 * Primarily an issue on systems with more than 4GB of memory.
2031 		 */
2032 		sata_free_dma_resources(spkt);
2033 	} else if (direction == SATA_DIR_WRITE) {
2034 		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio out command", NULL);
2035 		nv_slotp->nvslot_start = nv_start_pio_out;
2036 		nv_slotp->nvslot_intr = nv_intr_pio_out;
2037 		nv_slotp->nvslot_byte_count =
2038 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2039 		nv_slotp->nvslot_v_addr =
2040 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2041 	} else {
2042 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2043 		    " %d cookies %d cmd %x",
2044 		    sata_cmdp->satacmd_flags.sata_data_direction,
2045 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2046 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2047 		ret = SATA_TRAN_CMD_UNSUPPORTED;
2048 
2049 		goto fail;
2050 	}
2051 
2052 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2053 	    SATA_TRAN_ACCEPTED) {
2054 #ifdef SGPIO_SUPPORT
2055 		nv_sgp_drive_active(nvp->nvp_ctlp,
2056 		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2057 #endif
2058 		nv_slotp->nvslot_stime = ddi_get_lbolt();
2059 
2060 		/*
2061 		 * start timer if it's not already running and this packet
2062 		 * is not requesting polled mode.
2063 		 */
2064 		if ((nvp->nvp_timeout_id == 0) &&
2065 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2066 			nv_setup_timeout(nvp, NV_ONE_SEC);
2067 		}
2068 
2069 		nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2070 		nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2071 
2072 		return (SATA_TRAN_ACCEPTED);
2073 	}
2074 
2075 	fail:
2076 
2077 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2078 
2079 	if (ncq == NVSLOT_NCQ) {
2080 		nvp->nvp_ncq_run--;
2081 		nvp->nvp_sactive_cache &= ~on_bit;
2082 	} else {
2083 		nvp->nvp_non_ncq_run--;
2084 	}
2085 	nv_slotp->nvslot_spkt = NULL;
2086 	nv_slotp->nvslot_flags = 0;
2087 
2088 	return (ret);
2089 }
2090 
2091 
2092 /*
2093  * Check if the signature is ready and if non-zero translate
2094  * it into a solaris sata defined type.
2095  */
2096 static void
2097 nv_read_signature(nv_port_t *nvp)
2098 {
2099 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2100 	int retry_once = 0;
2101 
2102 	retry:
2103 	/*
2104 	 * Task file error register bit 0 set to 1 indicate that drive
2105 	 * is ready and have sent D2H FIS with a signature.
2106 	 */
2107 	if (nv_check_tfr_error != 0) {
2108 		uint8_t tfr_error = nv_get8(cmdhdl, nvp->nvp_error);
2109 		if (!(tfr_error & SATA_ERROR_ILI)) {
2110 			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2111 			    "nv_read_signature: signature not ready", NULL);
2112 
2113 			return;
2114 		}
2115 	}
2116 
2117 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2118 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2119 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2120 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2121 
2122 	NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2123 	    "nv_read_signature: 0x%x ", nvp->nvp_signature);
2124 
2125 	switch (nvp->nvp_signature) {
2126 
2127 	case NV_SIG_DISK:
2128 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp, "drive is a disk", NULL);
2129 		nvp->nvp_type = SATA_DTYPE_ATADISK;
2130 		break;
2131 	case NV_SIG_ATAPI:
2132 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2133 		    "drive is an optical device", NULL);
2134 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2135 		break;
2136 	case NV_SIG_PM:
2137 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2138 		    "device is a port multiplier", NULL);
2139 		nvp->nvp_type = SATA_DTYPE_PMULT;
2140 		break;
2141 	case NV_SIG_NOTREADY:
2142 		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2143 		    "signature not ready", NULL);
2144 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2145 		break;
2146 	default:
2147 		if (retry_once++ == 0) {
2148 			/*
2149 			 * this is a rare corner case where the controller
2150 			 * was in the middle of updating the registers as the
2151 			 * driver is reading them.  If this happens, wait a
2152 			 * bit and retry, but just once.
2153 			 */
2154 			NV_DELAY_NSEC(1000000);
2155 
2156 			goto retry;
2157 		}
2158 
2159 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2160 		    " recognized", nvp->nvp_signature);
2161 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2162 		break;
2163 	}
2164 
2165 	if (nvp->nvp_signature) {
2166 		nvp->nvp_state &= ~(NV_PORT_RESET_RETRY | NV_PORT_RESET);
2167 	}
2168 
2169 #ifdef SGPIO_SUPPORT
2170 	if (nvp->nvp_signature == NV_SIG_DISK) {
2171 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2172 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2173 	} else {
2174 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2175 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2176 	}
2177 #endif
2178 }
2179 
2180 
2181 /*
2182  * Set up a new timeout or complete a timeout.
2183  * Timeout value has to be specified in microseconds. If time is zero, no new
2184  * timeout is scheduled.
2185  * Must be called at the end of the timeout routine.
2186  */
2187 static void
2188 nv_setup_timeout(nv_port_t *nvp, int time)
2189 {
2190 	clock_t old_duration = nvp->nvp_timeout_duration;
2191 
2192 	ASSERT(time != 0);
2193 
2194 	if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2195 		/*
2196 		 * Since we are dropping the mutex for untimeout,
2197 		 * the timeout may be executed while we are trying to
2198 		 * untimeout and setting up a new timeout.
2199 		 * If nvp_timeout_duration is 0, then this function
2200 		 * was re-entered. Just exit.
2201 		 */
2202 	cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2203 		return;
2204 	}
2205 	nvp->nvp_timeout_duration = 0;
2206 	if (nvp->nvp_timeout_id == 0) {
2207 		/* Start new timer */
2208 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2209 		    drv_usectohz(time));
2210 	} else {
2211 		/*
2212 		 * If the currently running timeout is due later than the
2213 		 * requested one, restart it with a new expiration.
2214 		 * Our timeouts do not need to be accurate - we would be just
2215 		 * checking that the specified time was exceeded.
2216 		 */
2217 		if (old_duration > time) {
2218 			mutex_exit(&nvp->nvp_mutex);
2219 			(void) untimeout(nvp->nvp_timeout_id);
2220 			mutex_enter(&nvp->nvp_mutex);
2221 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2222 			    drv_usectohz(time));
2223 		}
2224 	}
2225 	nvp->nvp_timeout_duration = time;
2226 }
2227 
2228 
2229 
2230 int nv_reset_length = NV_RESET_LENGTH;
2231 
2232 /*
2233  * Reset the port
2234  *
2235  * Entered with nvp mutex held
2236  */
2237 static void
2238 nv_reset(nv_port_t *nvp, char *reason)
2239 {
2240 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2241 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2242 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2243 	uint32_t sctrl, serr, sstatus;
2244 	uint8_t bmicx;
2245 	int i, j, reset = 0;
2246 
2247 	ASSERT(mutex_owned(&nvp->nvp_mutex));
2248 
2249 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2250 
2251 	/*
2252 	 * stop DMA engine.
2253 	 */
2254 	bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2255 	nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2256 
2257 	nvp->nvp_state |= NV_PORT_RESET;
2258 	nvp->nvp_reset_time = ddi_get_lbolt();
2259 	nvp->nvp_reset_count++;
2260 
2261 	if (strcmp(reason, "attach") != 0) {
2262 		nv_cmn_err(CE_NOTE, nvc, nvp, "nv_reset: reason: %s serr 0x%x",
2263 		    reason, serr);
2264 		/*
2265 		 * keep a record of why the first reset occurred, for debugging
2266 		 */
2267 		if (nvp->nvp_first_reset_reason[0] == '\0') {
2268 			(void) strncpy(nvp->nvp_first_reset_reason,
2269 			    reason, NV_REASON_LEN);
2270 			nvp->nvp_first_reset_reason[NV_REASON_LEN - 1] = '\0';
2271 		}
2272 	}
2273 
2274 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset_count: %d",
2275 	    nvp->nvp_reset_count);
2276 
2277 	(void) strncpy(nvp->nvp_reset_reason, reason, NV_REASON_LEN);
2278 
2279 	/*
2280 	 * ensure there is terminating NULL
2281 	 */
2282 	nvp->nvp_reset_reason[NV_REASON_LEN - 1] = '\0';
2283 
2284 	/*
2285 	 * Issue hardware reset; retry if necessary.
2286 	 */
2287 	for (i = 0; i < NV_RESET_ATTEMPTS; i++) {
2288 		/*
2289 		 * Clear signature registers
2290 		 */
2291 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
2292 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2293 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2294 		nv_put8(cmdhdl, nvp->nvp_count, 0);
2295 
2296 		/* Clear task file error register */
2297 		nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2298 
2299 		/*
2300 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
2301 		 */
2302 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2303 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2304 		    sctrl | SCONTROL_DET_COMRESET);
2305 
2306 		/* Wait at least 1ms, as required by the spec */
2307 		drv_usecwait(nv_reset_length);
2308 
2309 		/* Reset all accumulated error bits */
2310 		nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2311 
2312 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2313 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2314 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2315 		    "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus);
2316 
2317 		/* de-assert reset in PHY */
2318 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2319 		    sctrl & ~SCONTROL_DET_COMRESET);
2320 
2321 		/*
2322 		 * Wait up to 10ms for COMINIT to arrive, indicating that
2323 		 * the device recognized COMRESET.
2324 		 */
2325 		for (j = 0; j < 10; j++) {
2326 			drv_usecwait(NV_ONE_MSEC);
2327 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2328 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2329 			    (SSTATUS_GET_DET(sstatus) ==
2330 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2331 				reset = 1;
2332 				break;
2333 			}
2334 		}
2335 		if (reset == 1)
2336 			break;
2337 	}
2338 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2339 	if (reset == 0) {
2340 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2341 		    "(serr 0x%x) after %d attempts", serr, i);
2342 	} else {
2343 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset succeeded (serr 0x%x)"
2344 		    "after %dms", serr, TICK_TO_MSEC(ddi_get_lbolt() -
2345 		    nvp->nvp_reset_time));
2346 	}
2347 	nvp->nvp_reset_time = ddi_get_lbolt();
2348 
2349 	if (servicing_interrupt()) {
2350 		nv_setup_timeout(nvp, NV_ONE_MSEC);
2351 	} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
2352 		nv_monitor_reset(nvp);
2353 	}
2354 }
2355 
2356 
2357 /*
2358  * Initialize register handling specific to mcp51/mcp55
2359  */
2360 /* ARGSUSED */
2361 static void
2362 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2363 {
2364 	nv_port_t *nvp;
2365 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2366 	uint8_t off, port;
2367 
2368 	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2369 	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2370 
2371 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2372 		nvp = &(nvc->nvc_port[port]);
2373 		nvp->nvp_mcp5x_int_status =
2374 		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2375 		nvp->nvp_mcp5x_int_ctl =
2376 		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2377 
2378 		/*
2379 		 * clear any previous interrupts asserted
2380 		 */
2381 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2382 		    MCP5X_INT_CLEAR);
2383 
2384 		/*
2385 		 * These are the interrupts to accept for now.  The spec
2386 		 * says these are enable bits, but nvidia has indicated
2387 		 * these are masking bits.  Even though they may be masked
2388 		 * out to prevent asserting the main interrupt, they can
2389 		 * still be asserted while reading the interrupt status
2390 		 * register, so that needs to be considered in the interrupt
2391 		 * handler.
2392 		 */
2393 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2394 		    ~(MCP5X_INT_IGNORE));
2395 	}
2396 
2397 	/*
2398 	 * Allow the driver to program the BM on the first command instead
2399 	 * of waiting for an interrupt.
2400 	 */
2401 #ifdef NCQ
2402 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2403 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2404 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2405 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2406 #endif
2407 
2408 	/*
2409 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2410 	 * Enable DMA to take advantage of that.
2411 	 *
2412 	 */
2413 	if (nvc->nvc_revid >= 0xa3) {
2414 		if (nv_sata_40bit_dma == B_TRUE) {
2415 			uint32_t reg32;
2416 			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2417 			    "rev id is %X.  40-bit DMA addressing"
2418 			    " enabled", nvc->nvc_revid);
2419 			nvc->dma_40bit = B_TRUE;
2420 
2421 			reg32 = pci_config_get32(pci_conf_handle,
2422 			    NV_SATA_CFG_20);
2423 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2424 			    reg32 | NV_40BIT_PRD);
2425 
2426 			/*
2427 			 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2428 			 * bits) for the primary PRD table, and bits 8-15
2429 			 * contain the top 8 bits for the secondary.  Set
2430 			 * to zero because the DMA attribute table for PRD
2431 			 * allocation forces it into 32 bit address space
2432 			 * anyway.
2433 			 */
2434 			reg32 = pci_config_get32(pci_conf_handle,
2435 			    NV_SATA_CFG_23);
2436 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2437 			    reg32 & 0xffff0000);
2438 		} else {
2439 			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2440 			    "40-bit DMA disabled by nv_sata_40bit_dma", NULL);
2441 		}
2442 	} else {
2443 		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2444 		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2445 	}
2446 }
2447 
2448 
2449 /*
2450  * Initialize register handling specific to ck804
2451  */
2452 static void
2453 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2454 {
2455 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2456 	uint32_t reg32;
2457 	uint16_t reg16;
2458 	nv_port_t *nvp;
2459 	int j;
2460 
2461 	/*
2462 	 * delay hotplug interrupts until PHYRDY.
2463 	 */
2464 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2465 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2466 	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2467 
2468 	/*
2469 	 * enable hot plug interrupts for channel x and y
2470 	 */
2471 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2472 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2473 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2474 	    NV_HIRQ_EN | reg16);
2475 
2476 
2477 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2478 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2479 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2480 	    NV_HIRQ_EN | reg16);
2481 
2482 	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2483 
2484 	/*
2485 	 * clear any existing interrupt pending then enable
2486 	 */
2487 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2488 		nvp = &(nvc->nvc_port[j]);
2489 		mutex_enter(&nvp->nvp_mutex);
2490 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2491 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2492 		mutex_exit(&nvp->nvp_mutex);
2493 	}
2494 }
2495 
2496 
2497 /*
2498  * Initialize the controller and set up driver data structures.
2499  * determine if ck804 or mcp5x class.
2500  */
2501 static int
2502 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2503 {
2504 	struct sata_hba_tran stran;
2505 	nv_port_t *nvp;
2506 	int j, ck804;
2507 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2508 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2509 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2510 	uint32_t reg32;
2511 	uint8_t reg8, reg8_save;
2512 
2513 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl entered", NULL);
2514 
2515 	ck804 = B_TRUE;
2516 #ifdef SGPIO_SUPPORT
2517 	nvc->nvc_mcp5x_flag = B_FALSE;
2518 #endif
2519 
2520 	/*
2521 	 * Need to set bit 2 to 1 at config offset 0x50
2522 	 * to enable access to the bar5 registers.
2523 	 */
2524 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2525 	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2526 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2527 		    reg32 | NV_BAR5_SPACE_EN);
2528 	}
2529 
2530 	/*
2531 	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2532 	 * task file registers into bar5 while mcp5x won't.  The offset of
2533 	 * the task file registers in mcp5x's space is unused, so it will
2534 	 * return zero.  So check one of the task file registers to see if it is
2535 	 * writable and reads back what was written.  If it's mcp5x it will
2536 	 * return back 0xff whereas ck804 will return the value written.
2537 	 */
2538 	reg8_save = nv_get8(bar5_hdl,
2539 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2540 
2541 
2542 	for (j = 1; j < 3; j++) {
2543 
2544 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2545 		reg8 = nv_get8(bar5_hdl,
2546 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2547 
2548 		if (reg8 != j) {
2549 			ck804 = B_FALSE;
2550 			nvc->nvc_mcp5x_flag = B_TRUE;
2551 			break;
2552 		}
2553 	}
2554 
2555 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2556 
2557 	if (ck804 == B_TRUE) {
2558 		NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804", NULL);
2559 		nvc->nvc_interrupt = ck804_intr;
2560 		nvc->nvc_reg_init = ck804_reg_init;
2561 		nvc->nvc_set_intr = ck804_set_intr;
2562 	} else {
2563 		NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55", NULL);
2564 		nvc->nvc_interrupt = mcp5x_intr;
2565 		nvc->nvc_reg_init = mcp5x_reg_init;
2566 		nvc->nvc_set_intr = mcp5x_set_intr;
2567 	}
2568 
2569 
2570 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2571 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2572 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2573 	stran.sata_tran_hba_features_support =
2574 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2575 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2576 	stran.sata_tran_probe_port = nv_sata_probe;
2577 	stran.sata_tran_start = nv_sata_start;
2578 	stran.sata_tran_abort = nv_sata_abort;
2579 	stran.sata_tran_reset_dport = nv_sata_reset;
2580 	stran.sata_tran_selftest = NULL;
2581 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2582 	stran.sata_tran_pwrmgt_ops = NULL;
2583 	stran.sata_tran_ioctl = NULL;
2584 	nvc->nvc_sata_hba_tran = stran;
2585 
2586 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2587 	    KM_SLEEP);
2588 
2589 	/*
2590 	 * initialize registers common to all chipsets
2591 	 */
2592 	nv_common_reg_init(nvc);
2593 
2594 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2595 		nvp = &(nvc->nvc_port[j]);
2596 
2597 		cmd_addr = nvp->nvp_cmd_addr;
2598 		ctl_addr = nvp->nvp_ctl_addr;
2599 		bm_addr = nvp->nvp_bm_addr;
2600 
2601 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2602 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2603 
2604 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2605 
2606 		nvp->nvp_data	= cmd_addr + NV_DATA;
2607 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2608 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2609 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2610 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2611 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2612 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2613 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2614 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2615 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2616 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2617 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2618 
2619 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2620 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2621 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2622 
2623 		nvp->nvp_state = 0;
2624 
2625 		/*
2626 		 * Initialize dma handles, etc.
2627 		 * If it fails, the port is in inactive state.
2628 		 */
2629 		(void) nv_init_port(nvp);
2630 	}
2631 
2632 	/*
2633 	 * initialize register by calling chip specific reg initialization
2634 	 */
2635 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2636 
2637 	/* initialize the hba dma attribute */
2638 	if (nvc->dma_40bit == B_TRUE)
2639 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2640 		    &buffer_dma_40bit_attr;
2641 	else
2642 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2643 		    &buffer_dma_attr;
2644 
2645 	return (NV_SUCCESS);
2646 }
2647 
2648 
2649 /*
2650  * Initialize data structures with enough slots to handle queuing, if
2651  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2652  * NCQ support is built into the driver and enabled.  It might have been
2653  * better to derive the true size from the drive itself, but the sata
2654  * module only sends down that information on the first NCQ command,
2655  * which means possibly re-sizing the structures on an interrupt stack,
2656  * making error handling more messy.  The easy way is to just allocate
2657  * all 32 slots, which is what most drives support anyway.
2658  */
2659 static int
2660 nv_init_port(nv_port_t *nvp)
2661 {
2662 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2663 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2664 	dev_info_t *dip = nvc->nvc_dip;
2665 	ddi_device_acc_attr_t dev_attr;
2666 	size_t buf_size;
2667 	ddi_dma_cookie_t cookie;
2668 	uint_t count;
2669 	int rc, i;
2670 
2671 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2672 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2673 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2674 
2675 	if (nvp->nvp_state & NV_PORT_INIT) {
2676 		NVLOG(NVDBG_INIT, nvc, nvp,
2677 		    "nv_init_port previously initialized", NULL);
2678 
2679 		return (NV_SUCCESS);
2680 	} else {
2681 		NVLOG(NVDBG_INIT, nvc, nvp, "nv_init_port initializing", NULL);
2682 	}
2683 
2684 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2685 	    NV_QUEUE_SLOTS, KM_SLEEP);
2686 
2687 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2688 	    NV_QUEUE_SLOTS, KM_SLEEP);
2689 
2690 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2691 	    NV_QUEUE_SLOTS, KM_SLEEP);
2692 
2693 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2694 	    NV_QUEUE_SLOTS, KM_SLEEP);
2695 
2696 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2697 	    KM_SLEEP);
2698 
2699 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2700 
2701 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2702 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2703 
2704 		if (rc != DDI_SUCCESS) {
2705 			nv_uninit_port(nvp);
2706 
2707 			return (NV_FAILURE);
2708 		}
2709 
2710 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2711 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2712 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2713 		    &(nvp->nvp_sg_acc_hdl[i]));
2714 
2715 		if (rc != DDI_SUCCESS) {
2716 			nv_uninit_port(nvp);
2717 
2718 			return (NV_FAILURE);
2719 		}
2720 
2721 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2722 		    nvp->nvp_sg_addr[i], buf_size,
2723 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2724 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2725 
2726 		if (rc != DDI_DMA_MAPPED) {
2727 			nv_uninit_port(nvp);
2728 
2729 			return (NV_FAILURE);
2730 		}
2731 
2732 		ASSERT(count == 1);
2733 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2734 
2735 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2736 
2737 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2738 	}
2739 
2740 	/*
2741 	 * nvp_queue_depth represents the actual drive queue depth, not the
2742 	 * number of slots allocated in the structures (which may be more).
2743 	 * Actual queue depth is only learned after the first NCQ command, so
2744 	 * initialize it to 1 for now.
2745 	 */
2746 	nvp->nvp_queue_depth = 1;
2747 
2748 	/*
2749 	 * Port is initialized whether the device is attached or not.
2750 	 * Link processing and device identification will be started later,
2751 	 * after interrupts are initialized.
2752 	 */
2753 	nvp->nvp_type = SATA_DTYPE_NONE;
2754 	nvp->nvp_signature = 0;
2755 
2756 	nvp->nvp_state |= NV_PORT_INIT;
2757 
2758 	return (NV_SUCCESS);
2759 }
2760 
2761 
2762 /*
2763  * Establish initial link & device type
2764  * Called only from nv_attach
2765  * Loops up to approximately 210ms; can exit earlier.
2766  * The time includes wait for the link up and completion of the initial
2767  * signature gathering operation.
2768  */
2769 static void
2770 nv_init_port_link_processing(nv_ctl_t *nvc)
2771 {
2772 	ddi_acc_handle_t bar5_hdl;
2773 	nv_port_t *nvp;
2774 	volatile uint32_t sstatus;
2775 	int port, links_up, ready_ports, i;
2776 
2777 
2778 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2779 		nvp = &(nvc->nvc_port[port]);
2780 		if (nvp != NULL && (nvp->nvp_state & NV_PORT_INIT)) {
2781 			/*
2782 			 * Initiate device identification, if any is attached
2783 			 * and reset was not already applied by hot-plug
2784 			 * event processing.
2785 			 */
2786 			mutex_enter(&nvp->nvp_mutex);
2787 			if (!(nvp->nvp_state & NV_PORT_RESET)) {
2788 				nvp->nvp_state |= NV_PORT_RESET | NV_PORT_PROBE;
2789 				nv_reset(nvp, "attach");
2790 			}
2791 			mutex_exit(&nvp->nvp_mutex);
2792 		}
2793 	}
2794 	/*
2795 	 * Wait up to 10ms for links up.
2796 	 * Spec says that link should be up in 1ms.
2797 	 */
2798 	for (i = 0; i < 10; i++) {
2799 		drv_usecwait(NV_ONE_MSEC);
2800 		links_up = 0;
2801 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2802 			nvp = &(nvc->nvc_port[port]);
2803 			mutex_enter(&nvp->nvp_mutex);
2804 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2805 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2806 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2807 			    (SSTATUS_GET_DET(sstatus) ==
2808 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2809 				if ((nvp->nvp_state & NV_PORT_RESET) &&
2810 				    nvp->nvp_type == SATA_DTYPE_NONE) {
2811 					nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2812 				}
2813 				NVLOG(NVDBG_INIT, nvc, nvp,
2814 				    "nv_init_port_link_processing()"
2815 				    "link up; time from reset %dms",
2816 				    TICK_TO_MSEC(ddi_get_lbolt() -
2817 				    nvp->nvp_reset_time));
2818 				links_up++;
2819 			}
2820 			mutex_exit(&nvp->nvp_mutex);
2821 		}
2822 		if (links_up == NV_MAX_PORTS(nvc)) {
2823 			break;
2824 		}
2825 	}
2826 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2827 	    "%d links up", links_up);
2828 	/*
2829 	 * At this point, if any device is attached, the link is established.
2830 	 * Wait till devices are ready to be accessed, no more than 200ms.
2831 	 * 200ms is empirical time in which a signature should be available.
2832 	 */
2833 	for (i = 0; i < 200; i++) {
2834 		ready_ports = 0;
2835 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2836 			nvp = &(nvc->nvc_port[port]);
2837 			mutex_enter(&nvp->nvp_mutex);
2838 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2839 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2840 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2841 			    (SSTATUS_GET_DET(sstatus) ==
2842 			    SSTATUS_DET_DEVPRE_PHYCOM) &&
2843 			    !(nvp->nvp_state & (NV_PORT_RESET |
2844 			    NV_PORT_RESET_RETRY))) {
2845 				/*
2846 				 * Reset already processed
2847 				 */
2848 				NVLOG(NVDBG_RESET, nvc, nvp,
2849 				    "nv_init_port_link_processing()"
2850 				    "device ready; port state %x; "
2851 				    "time from reset %dms", nvp->nvp_state,
2852 				    TICK_TO_MSEC(ddi_get_lbolt() -
2853 				    nvp->nvp_reset_time));
2854 
2855 				ready_ports++;
2856 			}
2857 			mutex_exit(&nvp->nvp_mutex);
2858 		}
2859 		if (ready_ports == links_up) {
2860 			break;
2861 		}
2862 		drv_usecwait(NV_ONE_MSEC);
2863 	}
2864 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2865 	    "%d devices ready", ready_ports);
2866 }
2867 
2868 /*
2869  * Free dynamically allocated structures for port.
2870  */
2871 static void
2872 nv_uninit_port(nv_port_t *nvp)
2873 {
2874 	int i;
2875 
2876 	/*
2877 	 * It is possible to reach here before a port has been initialized or
2878 	 * after it has already been uninitialized.  Just return in that case.
2879 	 */
2880 	if (nvp->nvp_slot == NULL) {
2881 
2882 		return;
2883 	}
2884 	/*
2885 	 * Mark port unusable now.
2886 	 */
2887 	nvp->nvp_state &= ~NV_PORT_INIT;
2888 
2889 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2890 	    "nv_uninit_port uninitializing", NULL);
2891 
2892 #ifdef SGPIO_SUPPORT
2893 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2894 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2895 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2896 	}
2897 #endif
2898 
2899 	nvp->nvp_type = SATA_DTYPE_NONE;
2900 
2901 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2902 		if (nvp->nvp_sg_paddr[i]) {
2903 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2904 		}
2905 
2906 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2907 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2908 		}
2909 
2910 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2911 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2912 		}
2913 	}
2914 
2915 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2916 	nvp->nvp_slot = NULL;
2917 
2918 	kmem_free(nvp->nvp_sg_dma_hdl,
2919 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2920 	nvp->nvp_sg_dma_hdl = NULL;
2921 
2922 	kmem_free(nvp->nvp_sg_acc_hdl,
2923 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2924 	nvp->nvp_sg_acc_hdl = NULL;
2925 
2926 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2927 	nvp->nvp_sg_addr = NULL;
2928 
2929 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2930 	nvp->nvp_sg_paddr = NULL;
2931 }
2932 
2933 
2934 /*
2935  * Cache register offsets and access handles to frequently accessed registers
2936  * which are common to either chipset.
2937  */
2938 static void
2939 nv_common_reg_init(nv_ctl_t *nvc)
2940 {
2941 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2942 	uchar_t *bm_addr_offset, *sreg_offset;
2943 	uint8_t bar, port;
2944 	nv_port_t *nvp;
2945 
2946 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2947 		if (port == 0) {
2948 			bar = NV_BAR_0;
2949 			bm_addr_offset = 0;
2950 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2951 		} else {
2952 			bar = NV_BAR_2;
2953 			bm_addr_offset = (uchar_t *)8;
2954 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2955 		}
2956 
2957 		nvp = &(nvc->nvc_port[port]);
2958 		nvp->nvp_ctlp = nvc;
2959 		nvp->nvp_port_num = port;
2960 		NVLOG(NVDBG_INIT, nvc, nvp, "setting up port mappings", NULL);
2961 
2962 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2963 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2964 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2965 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2966 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2967 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2968 		    (long)bm_addr_offset;
2969 
2970 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2971 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2972 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2973 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2974 	}
2975 }
2976 
2977 
2978 static void
2979 nv_uninit_ctl(nv_ctl_t *nvc)
2980 {
2981 	int port;
2982 	nv_port_t *nvp;
2983 
2984 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered", NULL);
2985 
2986 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2987 		nvp = &(nvc->nvc_port[port]);
2988 		mutex_enter(&nvp->nvp_mutex);
2989 		NVLOG(NVDBG_INIT, nvc, nvp, "uninitializing port", NULL);
2990 		nv_uninit_port(nvp);
2991 		mutex_exit(&nvp->nvp_mutex);
2992 		mutex_destroy(&nvp->nvp_mutex);
2993 		cv_destroy(&nvp->nvp_poll_cv);
2994 	}
2995 
2996 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2997 	nvc->nvc_port = NULL;
2998 }
2999 
3000 
3001 /*
3002  * ck804 interrupt.  This is a wrapper around ck804_intr_process so
3003  * that interrupts from other devices can be disregarded while dtracing.
3004  */
3005 /* ARGSUSED */
3006 static uint_t
3007 ck804_intr(caddr_t arg1, caddr_t arg2)
3008 {
3009 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3010 	uint8_t intr_status;
3011 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3012 
3013 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3014 		return (DDI_INTR_UNCLAIMED);
3015 
3016 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3017 
3018 	if (intr_status == 0) {
3019 
3020 		return (DDI_INTR_UNCLAIMED);
3021 	}
3022 
3023 	ck804_intr_process(nvc, intr_status);
3024 
3025 	return (DDI_INTR_CLAIMED);
3026 }
3027 
3028 
3029 /*
3030  * Main interrupt handler for ck804.  handles normal device
3031  * interrupts as well as port hot plug and remove interrupts.
3032  *
3033  */
3034 static void
3035 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
3036 {
3037 
3038 	int port, i;
3039 	nv_port_t *nvp;
3040 	nv_slot_t *nv_slotp;
3041 	uchar_t	status;
3042 	sata_pkt_t *spkt;
3043 	uint8_t bmstatus, clear_bits;
3044 	ddi_acc_handle_t bmhdl;
3045 	int nvcleared = 0;
3046 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3047 	uint32_t sstatus;
3048 	int port_mask_hot[] = {
3049 		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3050 	};
3051 	int port_mask_pm[] = {
3052 		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3053 	};
3054 
3055 	NVLOG(NVDBG_INTR, nvc, NULL,
3056 	    "ck804_intr_process entered intr_status=%x", intr_status);
3057 
3058 	/*
3059 	 * For command completion interrupt, explicit clear is not required.
3060 	 * however, for the error cases explicit clear is performed.
3061 	 */
3062 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3063 
3064 		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3065 
3066 		if ((port_mask[port] & intr_status) == 0) {
3067 			continue;
3068 		}
3069 
3070 		NVLOG(NVDBG_INTR, nvc, NULL,
3071 		    "ck804_intr_process interrupt on port %d", port);
3072 
3073 		nvp = &(nvc->nvc_port[port]);
3074 
3075 		mutex_enter(&nvp->nvp_mutex);
3076 
3077 		/*
3078 		 * there was a corner case found where an interrupt
3079 		 * arrived before nvp_slot was set.  Should
3080 		 * probably should track down why that happens and try
3081 		 * to eliminate that source and then get rid of this
3082 		 * check.
3083 		 */
3084 		if (nvp->nvp_slot == NULL) {
3085 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3086 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3087 			    "received before initialization "
3088 			    "completed status=%x", status);
3089 			mutex_exit(&nvp->nvp_mutex);
3090 
3091 			/*
3092 			 * clear interrupt bits
3093 			 */
3094 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3095 			    port_mask[port]);
3096 
3097 			continue;
3098 		}
3099 
3100 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
3101 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3102 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3103 			    " no command in progress status=%x", status);
3104 			mutex_exit(&nvp->nvp_mutex);
3105 
3106 			/*
3107 			 * clear interrupt bits
3108 			 */
3109 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3110 			    port_mask[port]);
3111 
3112 			continue;
3113 		}
3114 
3115 		bmhdl = nvp->nvp_bm_hdl;
3116 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3117 
3118 		if (!(bmstatus & BMISX_IDEINTS)) {
3119 			mutex_exit(&nvp->nvp_mutex);
3120 
3121 			continue;
3122 		}
3123 
3124 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3125 
3126 		if (status & SATA_STATUS_BSY) {
3127 			mutex_exit(&nvp->nvp_mutex);
3128 
3129 			continue;
3130 		}
3131 
3132 		nv_slotp = &(nvp->nvp_slot[0]);
3133 
3134 		ASSERT(nv_slotp);
3135 
3136 		spkt = nv_slotp->nvslot_spkt;
3137 
3138 		if (spkt == NULL) {
3139 			mutex_exit(&nvp->nvp_mutex);
3140 
3141 			continue;
3142 		}
3143 
3144 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3145 
3146 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3147 
3148 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3149 
3150 			nv_complete_io(nvp, spkt, 0);
3151 		}
3152 
3153 		mutex_exit(&nvp->nvp_mutex);
3154 	}
3155 
3156 	/*
3157 	 * ck804 often doesn't correctly distinguish hot add/remove
3158 	 * interrupts.  Frequently both the ADD and the REMOVE bits
3159 	 * are asserted, whether it was a remove or add.  Use sstatus
3160 	 * to distinguish hot add from hot remove.
3161 	 */
3162 
3163 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3164 		clear_bits = 0;
3165 
3166 		nvp = &(nvc->nvc_port[port]);
3167 		mutex_enter(&nvp->nvp_mutex);
3168 
3169 		if ((port_mask_pm[port] & intr_status) != 0) {
3170 			clear_bits = port_mask_pm[port];
3171 			NVLOG(NVDBG_HOT, nvc, nvp,
3172 			    "clearing PM interrupt bit: %x",
3173 			    intr_status & port_mask_pm[port]);
3174 		}
3175 
3176 		if ((port_mask_hot[port] & intr_status) == 0) {
3177 			if (clear_bits != 0) {
3178 				goto clear;
3179 			} else {
3180 				mutex_exit(&nvp->nvp_mutex);
3181 				continue;
3182 			}
3183 		}
3184 
3185 		/*
3186 		 * reaching here means there was a hot add or remove.
3187 		 */
3188 		clear_bits |= port_mask_hot[port];
3189 
3190 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
3191 
3192 		sstatus = nv_get32(bar5_hdl,
3193 		    nvc->nvc_port[port].nvp_sstatus);
3194 
3195 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3196 		    SSTATUS_DET_DEVPRE_PHYCOM) {
3197 			nv_report_add_remove(nvp, 0);
3198 		} else {
3199 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3200 		}
3201 	clear:
3202 		/*
3203 		 * clear interrupt bits.  explicit interrupt clear is
3204 		 * required for hotplug interrupts.
3205 		 */
3206 		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3207 
3208 		/*
3209 		 * make sure it's flushed and cleared.  If not try
3210 		 * again.  Sometimes it has been observed to not clear
3211 		 * on the first try.
3212 		 */
3213 		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3214 
3215 		/*
3216 		 * make 10 additional attempts to clear the interrupt
3217 		 */
3218 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3219 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3220 			    "still not clear try=%d", intr_status,
3221 			    ++nvcleared);
3222 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3223 			    clear_bits);
3224 			intr_status = nv_get8(bar5_hdl,
3225 			    nvc->nvc_ck804_int_status);
3226 		}
3227 
3228 		/*
3229 		 * if still not clear, log a message and disable the
3230 		 * port. highly unlikely that this path is taken, but it
3231 		 * gives protection against a wedged interrupt.
3232 		 */
3233 		if (intr_status & clear_bits) {
3234 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3235 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3236 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3237 			nvp->nvp_state |= NV_PORT_FAILED;
3238 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3239 			    B_TRUE);
3240 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3241 			    "interrupt.  disabling port intr_status=%X",
3242 			    intr_status);
3243 		}
3244 
3245 		mutex_exit(&nvp->nvp_mutex);
3246 	}
3247 }
3248 
3249 
3250 /*
3251  * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
3252  * on the controller, to handle completion and hot plug and remove events.
3253  *
3254  */
3255 static uint_t
3256 mcp5x_intr_port(nv_port_t *nvp)
3257 {
3258 	nv_ctl_t *nvc = nvp->nvp_ctlp;
3259 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3260 	uint8_t clear = 0, intr_cycles = 0;
3261 	int ret = DDI_INTR_UNCLAIMED;
3262 	uint16_t int_status;
3263 	clock_t intr_time;
3264 	int loop_cnt = 0;
3265 
3266 	nvp->intr_start_time = ddi_get_lbolt();
3267 
3268 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered", NULL);
3269 
3270 	do {
3271 		/*
3272 		 * read current interrupt status
3273 		 */
3274 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3275 
3276 		NVLOG(NVDBG_INTR, nvc, nvp, "int_status = %x", int_status);
3277 
3278 		/*
3279 		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3280 		 * but are masked out from causing an interrupt to be generated
3281 		 * to the processor.  Ignore them here by masking them out.
3282 		 */
3283 		int_status &= ~(MCP5X_INT_IGNORE);
3284 
3285 		/*
3286 		 * exit the loop when no more interrupts to process
3287 		 */
3288 		if (int_status == 0) {
3289 
3290 			break;
3291 		}
3292 
3293 		if (int_status & MCP5X_INT_COMPLETE) {
3294 			NVLOG(NVDBG_INTR, nvc, nvp,
3295 			    "mcp5x_packet_complete_intr", NULL);
3296 			/*
3297 			 * since int_status was set, return DDI_INTR_CLAIMED
3298 			 * from the DDI's perspective even though the packet
3299 			 * completion may not have succeeded.  If it fails,
3300 			 * need to manually clear the interrupt, otherwise
3301 			 * clearing is implicit.
3302 			 */
3303 			ret = DDI_INTR_CLAIMED;
3304 			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3305 			    NV_FAILURE) {
3306 				clear |= MCP5X_INT_COMPLETE;
3307 			} else {
3308 				intr_cycles = 0;
3309 			}
3310 		}
3311 
3312 		if (int_status & MCP5X_INT_DMA_SETUP) {
3313 			NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr",
3314 			    NULL);
3315 
3316 			/*
3317 			 * Needs to be cleared before starting the BM, so do it
3318 			 * now.  make sure this is still working.
3319 			 */
3320 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3321 			    MCP5X_INT_DMA_SETUP);
3322 #ifdef NCQ
3323 			ret = mcp5x_dma_setup_intr(nvc, nvp);
3324 #endif
3325 		}
3326 
3327 		if (int_status & MCP5X_INT_REM) {
3328 			clear |= MCP5X_INT_REM;
3329 			ret = DDI_INTR_CLAIMED;
3330 
3331 			mutex_enter(&nvp->nvp_mutex);
3332 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3333 			mutex_exit(&nvp->nvp_mutex);
3334 
3335 		} else if (int_status & MCP5X_INT_ADD) {
3336 			clear |= MCP5X_INT_ADD;
3337 			ret = DDI_INTR_CLAIMED;
3338 
3339 			mutex_enter(&nvp->nvp_mutex);
3340 			nv_report_add_remove(nvp, 0);
3341 			mutex_exit(&nvp->nvp_mutex);
3342 		}
3343 		if (clear) {
3344 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3345 			clear = 0;
3346 		}
3347 		/* Protect against a stuck interrupt */
3348 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3349 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3350 			    "processing.  Disabling port int_status=%X"
3351 			    " clear=%X", int_status, clear);
3352 			mutex_enter(&nvp->nvp_mutex);
3353 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3354 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3355 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3356 			nvp->nvp_state |= NV_PORT_FAILED;
3357 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3358 			    B_TRUE);
3359 			mutex_exit(&nvp->nvp_mutex);
3360 		}
3361 
3362 	} while (loop_cnt++ < nv_max_intr_loops);
3363 
3364 	if (loop_cnt > nvp->intr_loop_cnt) {
3365 		NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp,
3366 		    "Exiting with multiple intr loop count %d", loop_cnt);
3367 		nvp->intr_loop_cnt = loop_cnt;
3368 	}
3369 
3370 	if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3371 	    (NVDBG_INTR | NVDBG_VERBOSE)) {
3372 		uint8_t status, bmstatus;
3373 		uint16_t int_status2;
3374 
3375 		if (int_status & MCP5X_INT_COMPLETE) {
3376 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3377 			bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3378 			int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3379 			    nvp->nvp_mcp5x_int_status);
3380 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3381 			    "mcp55_intr_port: Exiting with altstatus %x, "
3382 			    "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3383 			    " loop_cnt %d ", status, bmstatus, int_status2,
3384 			    int_status, ret, loop_cnt);
3385 		}
3386 	}
3387 
3388 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret);
3389 
3390 	/*
3391 	 * To facilitate debugging, keep track of the length of time spent in
3392 	 * the port interrupt routine.
3393 	 */
3394 	intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3395 	if (intr_time > nvp->intr_duration)
3396 		nvp->intr_duration = intr_time;
3397 
3398 	return (ret);
3399 }
3400 
3401 
3402 /* ARGSUSED */
3403 static uint_t
3404 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3405 {
3406 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3407 	int ret;
3408 
3409 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3410 		return (DDI_INTR_UNCLAIMED);
3411 
3412 	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3413 	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3414 
3415 	return (ret);
3416 }
3417 
3418 
3419 #ifdef NCQ
3420 /*
3421  * with software driven NCQ on mcp5x, an interrupt occurs right
3422  * before the drive is ready to do a DMA transfer.  At this point,
3423  * the PRD table needs to be programmed and the DMA engine enabled
3424  * and ready to go.
3425  *
3426  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3427  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3428  * -- clear bit 0 of master command reg
3429  * -- program PRD
3430  * -- clear the interrupt status bit for the DMA Setup FIS
3431  * -- set bit 0 of the bus master command register
3432  */
3433 static int
3434 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3435 {
3436 	int slot;
3437 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3438 	uint8_t bmicx;
3439 	int port = nvp->nvp_port_num;
3440 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3441 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3442 
3443 	nv_cmn_err(CE_PANIC, nvc, nvp,
3444 	    "this is should not be executed at all until NCQ");
3445 
3446 	mutex_enter(&nvp->nvp_mutex);
3447 
3448 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3449 
3450 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3451 
3452 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3453 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache);
3454 
3455 	/*
3456 	 * halt the DMA engine.  This step is necessary according to
3457 	 * the mcp5x spec, probably since there may have been a "first" packet
3458 	 * that already programmed the DMA engine, but may not turn out to
3459 	 * be the first one processed.
3460 	 */
3461 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3462 
3463 	if (bmicx & BMICX_SSBM) {
3464 		NVLOG(NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3465 		    "another packet.  Cancelling and reprogramming", NULL);
3466 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3467 	}
3468 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3469 
3470 	nv_start_dma_engine(nvp, slot);
3471 
3472 	mutex_exit(&nvp->nvp_mutex);
3473 
3474 	return (DDI_INTR_CLAIMED);
3475 }
3476 #endif /* NCQ */
3477 
3478 
3479 /*
3480  * packet completion interrupt.  If the packet is complete, invoke
3481  * the packet completion callback.
3482  */
3483 static int
3484 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3485 {
3486 	uint8_t status, bmstatus;
3487 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3488 	int sactive;
3489 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3490 	sata_pkt_t *spkt;
3491 	nv_slot_t *nv_slotp;
3492 
3493 	mutex_enter(&nvp->nvp_mutex);
3494 
3495 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3496 
3497 	if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3498 		NVLOG(NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set", NULL);
3499 		mutex_exit(&nvp->nvp_mutex);
3500 
3501 		return (NV_FAILURE);
3502 	}
3503 
3504 	/*
3505 	 * Commands may have been processed by abort or timeout before
3506 	 * interrupt processing acquired the mutex. So we may be processing
3507 	 * an interrupt for packets that were already removed.
3508 	 * For functionning NCQ processing all slots may be checked, but
3509 	 * with NCQ disabled (current code), relying on *_run flags is OK.
3510 	 */
3511 	if (nvp->nvp_non_ncq_run) {
3512 		/*
3513 		 * If the just completed item is a non-ncq command, the busy
3514 		 * bit should not be set
3515 		 */
3516 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3517 		if (status & SATA_STATUS_BSY) {
3518 			nv_cmn_err(CE_WARN, nvc, nvp,
3519 			    "unexpected SATA_STATUS_BSY set");
3520 			mutex_exit(&nvp->nvp_mutex);
3521 			/*
3522 			 * calling function will clear interrupt.  then
3523 			 * the real interrupt will either arrive or the
3524 			 * packet timeout handling will take over and
3525 			 * reset.
3526 			 */
3527 			return (NV_FAILURE);
3528 		}
3529 		ASSERT(nvp->nvp_ncq_run == 0);
3530 	} else {
3531 		ASSERT(nvp->nvp_non_ncq_run == 0);
3532 		/*
3533 		 * Pre-NCQ code!
3534 		 * Nothing to do. The packet for the command that just
3535 		 * completed is already gone. Just clear the interrupt.
3536 		 */
3537 		(void) nv_bm_status_clear(nvp);
3538 		(void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3539 		mutex_exit(&nvp->nvp_mutex);
3540 		return (NV_SUCCESS);
3541 
3542 		/*
3543 		 * NCQ check for BSY here and wait if still bsy before
3544 		 * continuing. Rather than wait for it to be cleared
3545 		 * when starting a packet and wasting CPU time, the starting
3546 		 * thread can exit immediate, but might have to spin here
3547 		 * for a bit possibly.  Needs more work and experimentation.
3548 		 *
3549 		 */
3550 	}
3551 
3552 	/*
3553 	 * active_pkt_bit will represent the bitmap of the single completed
3554 	 * packet.  Because of the nature of sw assisted NCQ, only one
3555 	 * command will complete per interrupt.
3556 	 */
3557 
3558 	if (ncq_command == B_FALSE) {
3559 		active_pkt = 0;
3560 	} else {
3561 		/*
3562 		 * NCQ: determine which command just completed, by examining
3563 		 * which bit cleared in the register since last written.
3564 		 */
3565 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3566 
3567 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3568 
3569 		ASSERT(active_pkt_bit);
3570 
3571 
3572 		/*
3573 		 * this failure path needs more work to handle the
3574 		 * error condition and recovery.
3575 		 */
3576 		if (active_pkt_bit == 0) {
3577 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3578 
3579 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3580 			    "nvp->nvp_sactive %X", sactive,
3581 			    nvp->nvp_sactive_cache);
3582 
3583 			(void) nv_get8(cmdhdl, nvp->nvp_status);
3584 
3585 			mutex_exit(&nvp->nvp_mutex);
3586 
3587 			return (NV_FAILURE);
3588 		}
3589 
3590 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3591 		    active_pkt++, active_pkt_bit >>= 1) {
3592 		}
3593 
3594 		/*
3595 		 * make sure only one bit is ever turned on
3596 		 */
3597 		ASSERT(active_pkt_bit == 1);
3598 
3599 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3600 	}
3601 
3602 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3603 
3604 	spkt = nv_slotp->nvslot_spkt;
3605 
3606 	ASSERT(spkt != NULL);
3607 
3608 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3609 
3610 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3611 
3612 	if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3613 
3614 		nv_complete_io(nvp, spkt, active_pkt);
3615 	}
3616 
3617 	mutex_exit(&nvp->nvp_mutex);
3618 
3619 	return (NV_SUCCESS);
3620 }
3621 
3622 
3623 static void
3624 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3625 {
3626 
3627 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3628 
3629 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3630 		nvp->nvp_ncq_run--;
3631 	} else {
3632 		nvp->nvp_non_ncq_run--;
3633 	}
3634 
3635 	/*
3636 	 * mark the packet slot idle so it can be reused.  Do this before
3637 	 * calling satapkt_comp so the slot can be reused.
3638 	 */
3639 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3640 
3641 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3642 		/*
3643 		 * If this is not timed polled mode cmd, which has an
3644 		 * active thread monitoring for completion, then need
3645 		 * to signal the sleeping thread that the cmd is complete.
3646 		 */
3647 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3648 			cv_signal(&nvp->nvp_poll_cv);
3649 		}
3650 
3651 		return;
3652 	}
3653 
3654 	if (spkt->satapkt_comp != NULL) {
3655 		mutex_exit(&nvp->nvp_mutex);
3656 		(*spkt->satapkt_comp)(spkt);
3657 		mutex_enter(&nvp->nvp_mutex);
3658 	}
3659 }
3660 
3661 
3662 /*
3663  * check whether packet is ncq command or not.  for ncq command,
3664  * start it if there is still room on queue.  for non-ncq command only
3665  * start if no other command is running.
3666  */
3667 static int
3668 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3669 {
3670 	uint8_t cmd, ncq;
3671 
3672 	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry", NULL);
3673 
3674 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3675 
3676 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3677 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3678 
3679 	if (ncq == B_FALSE) {
3680 
3681 		if ((nvp->nvp_non_ncq_run == 1) ||
3682 		    (nvp->nvp_ncq_run > 0)) {
3683 			/*
3684 			 * next command is non-ncq which can't run
3685 			 * concurrently.  exit and return queue full.
3686 			 */
3687 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3688 
3689 			return (SATA_TRAN_QUEUE_FULL);
3690 		}
3691 
3692 		return (nv_start_common(nvp, spkt));
3693 	}
3694 
3695 	/*
3696 	 * ncq == B_TRUE
3697 	 */
3698 	if (nvp->nvp_non_ncq_run == 1) {
3699 		/*
3700 		 * cannot start any NCQ commands when there
3701 		 * is a non-NCQ command running.
3702 		 */
3703 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3704 
3705 		return (SATA_TRAN_QUEUE_FULL);
3706 	}
3707 
3708 #ifdef NCQ
3709 	/*
3710 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3711 	 * is being pulled out until NCQ support is later addressed
3712 	 *
3713 	 * nvp_queue_depth is initialized by the first NCQ command
3714 	 * received.
3715 	 */
3716 	if (nvp->nvp_queue_depth == 1) {
3717 		nvp->nvp_queue_depth =
3718 		    spkt->satapkt_device.satadev_qdepth;
3719 
3720 		ASSERT(nvp->nvp_queue_depth > 1);
3721 
3722 		NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3723 		    "nv_process_queue: nvp_queue_depth set to %d",
3724 		    nvp->nvp_queue_depth);
3725 	}
3726 #endif
3727 
3728 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3729 		/*
3730 		 * max number of NCQ commands already active
3731 		 */
3732 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3733 
3734 		return (SATA_TRAN_QUEUE_FULL);
3735 	}
3736 
3737 	return (nv_start_common(nvp, spkt));
3738 }
3739 
3740 
3741 /*
3742  * configure INTx and legacy interrupts
3743  */
3744 static int
3745 nv_add_legacy_intrs(nv_ctl_t *nvc)
3746 {
3747 	dev_info_t	*devinfo = nvc->nvc_dip;
3748 	int		actual, count = 0;
3749 	int		x, y, rc, inum = 0;
3750 
3751 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_legacy_intrs", NULL);
3752 
3753 	/*
3754 	 * get number of interrupts
3755 	 */
3756 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3757 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3758 		NVLOG(NVDBG_INIT, nvc, NULL,
3759 		    "ddi_intr_get_nintrs() failed, "
3760 		    "rc %d count %d", rc, count);
3761 
3762 		return (DDI_FAILURE);
3763 	}
3764 
3765 	/*
3766 	 * allocate an array of interrupt handles
3767 	 */
3768 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3769 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3770 
3771 	/*
3772 	 * call ddi_intr_alloc()
3773 	 */
3774 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3775 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3776 
3777 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3778 		nv_cmn_err(CE_WARN, nvc, NULL,
3779 		    "ddi_intr_alloc() failed, rc %d", rc);
3780 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3781 
3782 		return (DDI_FAILURE);
3783 	}
3784 
3785 	if (actual < count) {
3786 		nv_cmn_err(CE_WARN, nvc, NULL,
3787 		    "ddi_intr_alloc: requested: %d, received: %d",
3788 		    count, actual);
3789 
3790 		goto failure;
3791 	}
3792 
3793 	nvc->nvc_intr_cnt = actual;
3794 
3795 	/*
3796 	 * get intr priority
3797 	 */
3798 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3799 	    DDI_SUCCESS) {
3800 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3801 
3802 		goto failure;
3803 	}
3804 
3805 	/*
3806 	 * Test for high level mutex
3807 	 */
3808 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3809 		nv_cmn_err(CE_WARN, nvc, NULL,
3810 		    "nv_add_legacy_intrs: high level intr not supported");
3811 
3812 		goto failure;
3813 	}
3814 
3815 	for (x = 0; x < actual; x++) {
3816 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3817 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3818 			nv_cmn_err(CE_WARN, nvc, NULL,
3819 			    "ddi_intr_add_handler() failed");
3820 
3821 			goto failure;
3822 		}
3823 	}
3824 
3825 	/*
3826 	 * call ddi_intr_enable() for legacy interrupts
3827 	 */
3828 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3829 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3830 	}
3831 
3832 	return (DDI_SUCCESS);
3833 
3834 	failure:
3835 	/*
3836 	 * free allocated intr and nvc_htable
3837 	 */
3838 	for (y = 0; y < actual; y++) {
3839 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3840 	}
3841 
3842 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3843 
3844 	return (DDI_FAILURE);
3845 }
3846 
3847 #ifdef	NV_MSI_SUPPORTED
3848 /*
3849  * configure MSI interrupts
3850  */
3851 static int
3852 nv_add_msi_intrs(nv_ctl_t *nvc)
3853 {
3854 	dev_info_t	*devinfo = nvc->nvc_dip;
3855 	int		count, avail, actual;
3856 	int		x, y, rc, inum = 0;
3857 
3858 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_msi_intrs", NULL);
3859 
3860 	/*
3861 	 * get number of interrupts
3862 	 */
3863 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3864 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3865 		nv_cmn_err(CE_WARN, nvc, NULL,
3866 		    "ddi_intr_get_nintrs() failed, "
3867 		    "rc %d count %d", rc, count);
3868 
3869 		return (DDI_FAILURE);
3870 	}
3871 
3872 	/*
3873 	 * get number of available interrupts
3874 	 */
3875 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3876 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3877 		nv_cmn_err(CE_WARN, nvc, NULL,
3878 		    "ddi_intr_get_navail() failed, "
3879 		    "rc %d avail %d", rc, avail);
3880 
3881 		return (DDI_FAILURE);
3882 	}
3883 
3884 	if (avail < count) {
3885 		nv_cmn_err(CE_WARN, nvc, NULL,
3886 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3887 		    avail, count);
3888 	}
3889 
3890 	/*
3891 	 * allocate an array of interrupt handles
3892 	 */
3893 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3894 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3895 
3896 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3897 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3898 
3899 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3900 		nv_cmn_err(CE_WARN, nvc, NULL,
3901 		    "ddi_intr_alloc() failed, rc %d", rc);
3902 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3903 
3904 		return (DDI_FAILURE);
3905 	}
3906 
3907 	/*
3908 	 * Use interrupt count returned or abort?
3909 	 */
3910 	if (actual < count) {
3911 		NVLOG(NVDBG_INIT, nvc, NULL,
3912 		    "Requested: %d, Received: %d", count, actual);
3913 	}
3914 
3915 	nvc->nvc_intr_cnt = actual;
3916 
3917 	/*
3918 	 * get priority for first msi, assume remaining are all the same
3919 	 */
3920 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3921 	    DDI_SUCCESS) {
3922 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3923 
3924 		goto failure;
3925 	}
3926 
3927 	/*
3928 	 * test for high level mutex
3929 	 */
3930 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3931 		nv_cmn_err(CE_WARN, nvc, NULL,
3932 		    "nv_add_msi_intrs: high level intr not supported");
3933 
3934 		goto failure;
3935 	}
3936 
3937 	/*
3938 	 * Call ddi_intr_add_handler()
3939 	 */
3940 	for (x = 0; x < actual; x++) {
3941 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3942 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3943 			nv_cmn_err(CE_WARN, nvc, NULL,
3944 			    "ddi_intr_add_handler() failed");
3945 
3946 			goto failure;
3947 		}
3948 	}
3949 
3950 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3951 
3952 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3953 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3954 		    nvc->nvc_intr_cnt);
3955 	} else {
3956 		/*
3957 		 * Call ddi_intr_enable() for MSI non block enable
3958 		 */
3959 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3960 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3961 		}
3962 	}
3963 
3964 	return (DDI_SUCCESS);
3965 
3966 	failure:
3967 	/*
3968 	 * free allocated intr and nvc_htable
3969 	 */
3970 	for (y = 0; y < actual; y++) {
3971 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3972 	}
3973 
3974 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3975 
3976 	return (DDI_FAILURE);
3977 }
3978 #endif
3979 
3980 
3981 static void
3982 nv_rem_intrs(nv_ctl_t *nvc)
3983 {
3984 	int x, i;
3985 	nv_port_t *nvp;
3986 
3987 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_rem_intrs", NULL);
3988 
3989 	/*
3990 	 * prevent controller from generating interrupts by
3991 	 * masking them out.  This is an extra precaution.
3992 	 */
3993 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3994 		nvp = (&nvc->nvc_port[i]);
3995 		mutex_enter(&nvp->nvp_mutex);
3996 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3997 		mutex_exit(&nvp->nvp_mutex);
3998 	}
3999 
4000 	/*
4001 	 * disable all interrupts
4002 	 */
4003 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
4004 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
4005 		(void) ddi_intr_block_disable(nvc->nvc_htable,
4006 		    nvc->nvc_intr_cnt);
4007 	} else {
4008 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4009 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
4010 		}
4011 	}
4012 
4013 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4014 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
4015 		(void) ddi_intr_free(nvc->nvc_htable[x]);
4016 	}
4017 
4018 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4019 }
4020 
4021 
4022 /*
4023  * variable argument wrapper for cmn_err.  prefixes the instance and port
4024  * number if possible
4025  */
4026 static void
4027 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
4028 {
4029 	char port[NV_STR_LEN];
4030 	char inst[NV_STR_LEN];
4031 	dev_info_t *dip;
4032 
4033 	if (nvc) {
4034 		(void) snprintf(inst, NV_STR_LEN, "inst %d",
4035 		    ddi_get_instance(nvc->nvc_dip));
4036 		dip = nvc->nvc_dip;
4037 	} else {
4038 		inst[0] = '\0';
4039 	}
4040 
4041 	if (nvp) {
4042 		(void) sprintf(port, "port%d", nvp->nvp_port_num);
4043 		dip = nvp->nvp_ctlp->nvc_dip;
4044 	} else {
4045 		port[0] = '\0';
4046 	}
4047 
4048 	mutex_enter(&nv_log_mutex);
4049 
4050 	(void) sprintf(nv_log_buf, "nv_sata %s %s%s", inst, port,
4051 	    (inst[0]|port[0] ? ": " :""));
4052 
4053 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4054 	    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4055 
4056 	/*
4057 	 * Log to console or log to file, depending on
4058 	 * nv_log_to_console setting.
4059 	 */
4060 	if (nv_log_to_console) {
4061 		if (nv_prom_print) {
4062 			prom_printf("%s\n", nv_log_buf);
4063 		} else {
4064 			cmn_err(ce, "%s", nv_log_buf);
4065 		}
4066 
4067 
4068 	} else {
4069 		cmn_err(ce, "!%s", nv_log_buf);
4070 	}
4071 
4072 
4073 	(void) sprintf(nv_log_buf, "%s%s", port, (port[0] ? ": " :""));
4074 
4075 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4076 	    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4077 
4078 	sata_trace_debug(dip, nv_log_buf);
4079 
4080 
4081 	mutex_exit(&nv_log_mutex);
4082 }
4083 
4084 
4085 /*
4086  * wrapper for cmn_err
4087  */
4088 static void
4089 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4090 {
4091 	va_list ap;
4092 
4093 	va_start(ap, fmt);
4094 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
4095 	va_end(ap);
4096 }
4097 
4098 
4099 static void
4100 nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...)
4101 {
4102 	va_list ap;
4103 
4104 	va_start(ap, fmt);
4105 
4106 	if (nvp == NULL && nvc == NULL) {
4107 		sata_vtrace_debug(NULL, fmt, ap);
4108 		va_end(ap);
4109 
4110 		return;
4111 	}
4112 
4113 	if (nvp == NULL && nvc != NULL) {
4114 		sata_vtrace_debug(nvc->nvc_dip, fmt, ap);
4115 		va_end(ap);
4116 
4117 		return;
4118 	}
4119 
4120 	/*
4121 	 * nvp is not NULL, but nvc might be.  Reference nvp for both
4122 	 * port and dip.
4123 	 */
4124 	mutex_enter(&nv_log_mutex);
4125 
4126 	(void) snprintf(nv_log_buf, NV_LOGBUF_LEN, "port%d: %s",
4127 	    nvp->nvp_port_num, fmt);
4128 
4129 	sata_vtrace_debug(nvp->nvp_ctlp->nvc_dip, nv_log_buf, ap);
4130 
4131 	mutex_exit(&nv_log_mutex);
4132 
4133 	va_end(ap);
4134 }
4135 
4136 
4137 /*
4138  * program registers which are common to all commands
4139  */
4140 static void
4141 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4142 {
4143 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4144 	sata_pkt_t *spkt;
4145 	sata_cmd_t *satacmd;
4146 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4147 	uint8_t cmd, ncq = B_FALSE;
4148 
4149 	spkt = nv_slotp->nvslot_spkt;
4150 	satacmd = &spkt->satapkt_cmd;
4151 	cmd = satacmd->satacmd_cmd_reg;
4152 
4153 	ASSERT(nvp->nvp_slot);
4154 
4155 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4156 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4157 		ncq = B_TRUE;
4158 	}
4159 
4160 	/*
4161 	 * select the drive
4162 	 */
4163 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4164 
4165 	/*
4166 	 * make certain the drive selected
4167 	 */
4168 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4169 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4170 
4171 		return;
4172 	}
4173 
4174 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
4175 
4176 	case ATA_ADDR_LBA:
4177 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode",
4178 		    NULL);
4179 
4180 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4181 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4182 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4183 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4184 		nv_put8(cmdhdl, nvp->nvp_feature,
4185 		    satacmd->satacmd_features_reg);
4186 
4187 		break;
4188 
4189 	case ATA_ADDR_LBA28:
4190 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4191 		    "ATA_ADDR_LBA28 mode", NULL);
4192 		/*
4193 		 * NCQ only uses 48-bit addressing
4194 		 */
4195 		ASSERT(ncq != B_TRUE);
4196 
4197 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4198 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4199 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4200 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4201 		nv_put8(cmdhdl, nvp->nvp_feature,
4202 		    satacmd->satacmd_features_reg);
4203 
4204 		break;
4205 
4206 	case ATA_ADDR_LBA48:
4207 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4208 		    "ATA_ADDR_LBA48 mode", NULL);
4209 
4210 		/*
4211 		 * for NCQ, tag goes into count register and real sector count
4212 		 * into features register.  The sata module does the translation
4213 		 * in the satacmd.
4214 		 */
4215 		if (ncq == B_TRUE) {
4216 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4217 		} else {
4218 			nv_put8(cmdhdl, nvp->nvp_count,
4219 			    satacmd->satacmd_sec_count_msb);
4220 			nv_put8(cmdhdl, nvp->nvp_count,
4221 			    satacmd->satacmd_sec_count_lsb);
4222 		}
4223 		nv_put8(cmdhdl, nvp->nvp_feature,
4224 		    satacmd->satacmd_features_reg_ext);
4225 		nv_put8(cmdhdl, nvp->nvp_feature,
4226 		    satacmd->satacmd_features_reg);
4227 
4228 		/*
4229 		 * send the high-order half first
4230 		 */
4231 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4232 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4233 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4234 		/*
4235 		 * Send the low-order half
4236 		 */
4237 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4238 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4239 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4240 
4241 		break;
4242 
4243 	case 0:
4244 		/*
4245 		 * non-media access commands such as identify and features
4246 		 * take this path.
4247 		 */
4248 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4249 		nv_put8(cmdhdl, nvp->nvp_feature,
4250 		    satacmd->satacmd_features_reg);
4251 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4252 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4253 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4254 
4255 		break;
4256 
4257 	default:
4258 		break;
4259 	}
4260 
4261 	ASSERT(nvp->nvp_slot);
4262 }
4263 
4264 
4265 /*
4266  * start a command that involves no media access
4267  */
4268 static int
4269 nv_start_nodata(nv_port_t *nvp, int slot)
4270 {
4271 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4272 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4273 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4274 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4275 
4276 	nv_program_taskfile_regs(nvp, slot);
4277 
4278 	/*
4279 	 * This next one sets the controller in motion
4280 	 */
4281 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4282 
4283 	return (SATA_TRAN_ACCEPTED);
4284 }
4285 
4286 
4287 static int
4288 nv_bm_status_clear(nv_port_t *nvp)
4289 {
4290 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4291 	uchar_t	status, ret;
4292 
4293 	/*
4294 	 * Get the current BM status
4295 	 */
4296 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4297 
4298 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4299 
4300 	/*
4301 	 * Clear the latches (and preserve the other bits)
4302 	 */
4303 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
4304 
4305 	return (ret);
4306 }
4307 
4308 
4309 /*
4310  * program the bus master DMA engine with the PRD address for
4311  * the active slot command, and start the DMA engine.
4312  */
4313 static void
4314 nv_start_dma_engine(nv_port_t *nvp, int slot)
4315 {
4316 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4317 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4318 	uchar_t direction;
4319 
4320 	ASSERT(nv_slotp->nvslot_spkt != NULL);
4321 
4322 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4323 	    == SATA_DIR_READ) {
4324 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4325 	} else {
4326 		direction = BMICX_RWCON_READ_FROM_MEMORY;
4327 	}
4328 
4329 	NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4330 	    "nv_start_dma_engine entered", NULL);
4331 
4332 #if NOT_USED
4333 	/*
4334 	 * NOT NEEDED. Left here of historical reason.
4335 	 * Reset the controller's interrupt and error status bits.
4336 	 */
4337 	(void) nv_bm_status_clear(nvp);
4338 #endif
4339 	/*
4340 	 * program the PRD table physical start address
4341 	 */
4342 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4343 
4344 	/*
4345 	 * set the direction control and start the DMA controller
4346 	 */
4347 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4348 }
4349 
4350 /*
4351  * start dma command, either in or out
4352  */
4353 static int
4354 nv_start_dma(nv_port_t *nvp, int slot)
4355 {
4356 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4357 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4358 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4359 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4360 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4361 #ifdef NCQ
4362 	uint8_t ncq = B_FALSE;
4363 #endif
4364 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4365 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4366 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4367 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4368 
4369 	ASSERT(sg_count != 0);
4370 
4371 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4372 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4373 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4374 		    sata_cmdp->satacmd_num_dma_cookies);
4375 
4376 		return (NV_FAILURE);
4377 	}
4378 
4379 	nv_program_taskfile_regs(nvp, slot);
4380 
4381 	/*
4382 	 * start the drive in motion
4383 	 */
4384 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4385 
4386 	/*
4387 	 * the drive starts processing the transaction when the cmd register
4388 	 * is written.  This is done here before programming the DMA engine to
4389 	 * parallelize and save some time.  In the event that the drive is ready
4390 	 * before DMA, it will wait.
4391 	 */
4392 #ifdef NCQ
4393 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4394 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4395 		ncq = B_TRUE;
4396 	}
4397 #endif
4398 
4399 	/*
4400 	 * copy the PRD list to PRD table in DMA accessible memory
4401 	 * so that the controller can access it.
4402 	 */
4403 	for (idx = 0; idx < sg_count; idx++, srcp++) {
4404 		uint32_t size;
4405 
4406 		nv_put32(sghdl, dstp++, srcp->dmac_address);
4407 
4408 		/* Set the number of bytes to transfer, 0 implies 64KB */
4409 		size = srcp->dmac_size;
4410 		if (size == 0x10000)
4411 			size = 0;
4412 
4413 		/*
4414 		 * If this is a 40-bit address, copy bits 32-40 of the
4415 		 * physical address to bits 16-24 of the PRD count.
4416 		 */
4417 		if (srcp->dmac_laddress > UINT32_MAX) {
4418 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4419 		}
4420 
4421 		/*
4422 		 * set the end of table flag for the last entry
4423 		 */
4424 		if (idx == (sg_count - 1)) {
4425 			size |= PRDE_EOT;
4426 		}
4427 
4428 		nv_put32(sghdl, dstp++, size);
4429 	}
4430 
4431 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4432 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4433 
4434 	nv_start_dma_engine(nvp, slot);
4435 
4436 #ifdef NCQ
4437 	/*
4438 	 * optimization:  for SWNCQ, start DMA engine if this is the only
4439 	 * command running.  Preliminary NCQ efforts indicated this needs
4440 	 * more debugging.
4441 	 *
4442 	 * if (nvp->nvp_ncq_run <= 1)
4443 	 */
4444 
4445 	if (ncq == B_FALSE) {
4446 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4447 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4448 		    " cmd = %X", non_ncq_commands++, cmd);
4449 		nv_start_dma_engine(nvp, slot);
4450 	} else {
4451 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "NCQ, so program "
4452 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd);
4453 	}
4454 #endif /* NCQ */
4455 
4456 	return (SATA_TRAN_ACCEPTED);
4457 }
4458 
4459 
4460 /*
4461  * start a PIO data-in ATA command
4462  */
4463 static int
4464 nv_start_pio_in(nv_port_t *nvp, int slot)
4465 {
4466 
4467 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4468 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4469 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4470 
4471 	nv_program_taskfile_regs(nvp, slot);
4472 
4473 	/*
4474 	 * This next one sets the drive in motion
4475 	 */
4476 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4477 
4478 	return (SATA_TRAN_ACCEPTED);
4479 }
4480 
4481 
4482 /*
4483  * start a PIO data-out ATA command
4484  */
4485 static int
4486 nv_start_pio_out(nv_port_t *nvp, int slot)
4487 {
4488 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4489 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4490 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4491 
4492 	nv_program_taskfile_regs(nvp, slot);
4493 
4494 	/*
4495 	 * this next one sets the drive in motion
4496 	 */
4497 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4498 
4499 	/*
4500 	 * wait for the busy bit to settle
4501 	 */
4502 	NV_DELAY_NSEC(400);
4503 
4504 	/*
4505 	 * wait for the drive to assert DRQ to send the first chunk
4506 	 * of data. Have to busy wait because there's no interrupt for
4507 	 * the first chunk. This is bad... uses a lot of cycles if the
4508 	 * drive responds too slowly or if the wait loop granularity
4509 	 * is too large. It's even worse if the drive is defective and
4510 	 * the loop times out.
4511 	 */
4512 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4513 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4514 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4515 	    4000000, 0) == B_FALSE) {
4516 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4517 
4518 		goto error;
4519 	}
4520 
4521 	/*
4522 	 * send the first block.
4523 	 */
4524 	nv_intr_pio_out(nvp, nv_slotp);
4525 
4526 	/*
4527 	 * If nvslot_flags is not set to COMPLETE yet, then processing
4528 	 * is OK so far, so return.  Otherwise, fall into error handling
4529 	 * below.
4530 	 */
4531 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4532 
4533 		return (SATA_TRAN_ACCEPTED);
4534 	}
4535 
4536 	error:
4537 	/*
4538 	 * there was an error so reset the device and complete the packet.
4539 	 */
4540 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4541 	nv_complete_io(nvp, spkt, 0);
4542 	nvp->nvp_state |= NV_PORT_RESET;
4543 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4544 	nv_reset(nvp, "pio_out");
4545 
4546 	return (SATA_TRAN_PORT_ERROR);
4547 }
4548 
4549 
4550 /*
4551  * start a ATAPI Packet command (PIO data in or out)
4552  */
4553 static int
4554 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4555 {
4556 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4557 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4558 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4559 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4560 
4561 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4562 	    "nv_start_pkt_pio: start", NULL);
4563 
4564 	/*
4565 	 * Write the PACKET command to the command register.  Normally
4566 	 * this would be done through nv_program_taskfile_regs().  It
4567 	 * is done here because some values need to be overridden.
4568 	 */
4569 
4570 	/* select the drive */
4571 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4572 
4573 	/* make certain the drive selected */
4574 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4575 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4576 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4577 		    "nv_start_pkt_pio: drive select failed", NULL);
4578 		return (SATA_TRAN_PORT_ERROR);
4579 	}
4580 
4581 	/*
4582 	 * The command is always sent via PIO, despite whatever the SATA
4583 	 * framework sets in the command.  Overwrite the DMA bit to do this.
4584 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4585 	 */
4586 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4587 
4588 	/* set appropriately by the sata framework */
4589 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4590 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4591 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4592 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4593 
4594 	/* initiate the command by writing the command register last */
4595 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4596 
4597 	/* Give the host controller time to do its thing */
4598 	NV_DELAY_NSEC(400);
4599 
4600 	/*
4601 	 * Wait for the device to indicate that it is ready for the command
4602 	 * ATAPI protocol state - HP0: Check_Status_A
4603 	 */
4604 
4605 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4606 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4607 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4608 	    4000000, 0) == B_FALSE) {
4609 		/*
4610 		 * Either an error or device fault occurred or the wait
4611 		 * timed out.  According to the ATAPI protocol, command
4612 		 * completion is also possible.  Other implementations of
4613 		 * this protocol don't handle this last case, so neither
4614 		 * does this code.
4615 		 */
4616 
4617 		if (nv_get8(cmdhdl, nvp->nvp_status) &
4618 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4619 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4620 
4621 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4622 			    "nv_start_pkt_pio: device error (HP0)", NULL);
4623 		} else {
4624 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4625 
4626 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4627 			    "nv_start_pkt_pio: timeout (HP0)", NULL);
4628 		}
4629 
4630 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4631 		nv_complete_io(nvp, spkt, 0);
4632 		nvp->nvp_state |= NV_PORT_RESET;
4633 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4634 		nv_reset(nvp, "start_pkt_pio");
4635 
4636 		return (SATA_TRAN_PORT_ERROR);
4637 	}
4638 
4639 	/*
4640 	 * Put the ATAPI command in the data register
4641 	 * ATAPI protocol state - HP1: Send_Packet
4642 	 */
4643 
4644 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4645 	    (ushort_t *)nvp->nvp_data,
4646 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4647 
4648 	/*
4649 	 * See you in nv_intr_pkt_pio.
4650 	 * ATAPI protocol state - HP3: INTRQ_wait
4651 	 */
4652 
4653 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4654 	    "nv_start_pkt_pio: exiting into HP3", NULL);
4655 
4656 	return (SATA_TRAN_ACCEPTED);
4657 }
4658 
4659 
4660 /*
4661  * Interrupt processing for a non-data ATA command.
4662  */
4663 static void
4664 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4665 {
4666 	uchar_t status;
4667 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4668 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4669 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4670 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4671 
4672 	NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered", NULL);
4673 
4674 	status = nv_get8(cmdhdl, nvp->nvp_status);
4675 
4676 	/*
4677 	 * check for errors
4678 	 */
4679 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4680 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4681 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4682 		    nvp->nvp_altstatus);
4683 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4684 	} else {
4685 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4686 	}
4687 
4688 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4689 }
4690 
4691 
4692 /*
4693  * ATA command, PIO data in
4694  */
4695 static void
4696 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4697 {
4698 	uchar_t	status;
4699 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4700 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4701 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4702 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4703 	int count;
4704 
4705 	status = nv_get8(cmdhdl, nvp->nvp_status);
4706 
4707 	if (status & SATA_STATUS_BSY) {
4708 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4709 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4710 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4711 		    nvp->nvp_altstatus);
4712 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4713 		nvp->nvp_state |= NV_PORT_RESET;
4714 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4715 		nv_reset(nvp, "intr_pio_in");
4716 
4717 		return;
4718 	}
4719 
4720 	/*
4721 	 * check for errors
4722 	 */
4723 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4724 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4725 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4726 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4727 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4728 
4729 		return;
4730 	}
4731 
4732 	/*
4733 	 * read the next chunk of data (if any)
4734 	 */
4735 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4736 
4737 	/*
4738 	 * read count bytes
4739 	 */
4740 	ASSERT(count != 0);
4741 
4742 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4743 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4744 
4745 	nv_slotp->nvslot_v_addr += count;
4746 	nv_slotp->nvslot_byte_count -= count;
4747 
4748 
4749 	if (nv_slotp->nvslot_byte_count != 0) {
4750 		/*
4751 		 * more to transfer.  Wait for next interrupt.
4752 		 */
4753 		return;
4754 	}
4755 
4756 	/*
4757 	 * transfer is complete. wait for the busy bit to settle.
4758 	 */
4759 	NV_DELAY_NSEC(400);
4760 
4761 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4762 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4763 }
4764 
4765 
4766 /*
4767  * ATA command PIO data out
4768  */
4769 static void
4770 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4771 {
4772 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4773 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4774 	uchar_t status;
4775 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4776 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4777 	int count;
4778 
4779 	/*
4780 	 * clear the IRQ
4781 	 */
4782 	status = nv_get8(cmdhdl, nvp->nvp_status);
4783 
4784 	if (status & SATA_STATUS_BSY) {
4785 		/*
4786 		 * this should not happen
4787 		 */
4788 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4789 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4790 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4791 		    nvp->nvp_altstatus);
4792 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4793 
4794 		return;
4795 	}
4796 
4797 	/*
4798 	 * check for errors
4799 	 */
4800 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4801 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4802 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4803 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4804 
4805 		return;
4806 	}
4807 
4808 	/*
4809 	 * this is the condition which signals the drive is
4810 	 * no longer ready to transfer.  Likely that the transfer
4811 	 * completed successfully, but check that byte_count is
4812 	 * zero.
4813 	 */
4814 	if ((status & SATA_STATUS_DRQ) == 0) {
4815 
4816 		if (nv_slotp->nvslot_byte_count == 0) {
4817 			/*
4818 			 * complete; successful transfer
4819 			 */
4820 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4821 		} else {
4822 			/*
4823 			 * error condition, incomplete transfer
4824 			 */
4825 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4826 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4827 		}
4828 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4829 
4830 		return;
4831 	}
4832 
4833 	/*
4834 	 * write the next chunk of data
4835 	 */
4836 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4837 
4838 	/*
4839 	 * read or write count bytes
4840 	 */
4841 
4842 	ASSERT(count != 0);
4843 
4844 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4845 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4846 
4847 	nv_slotp->nvslot_v_addr += count;
4848 	nv_slotp->nvslot_byte_count -= count;
4849 }
4850 
4851 
4852 /*
4853  * ATAPI PACKET command, PIO in/out interrupt
4854  *
4855  * Under normal circumstances, one of four different interrupt scenarios
4856  * will result in this function being called:
4857  *
4858  * 1. Packet command data transfer
4859  * 2. Packet command completion
4860  * 3. Request sense data transfer
4861  * 4. Request sense command completion
4862  */
4863 static void
4864 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4865 {
4866 	uchar_t	status;
4867 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4868 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4869 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4870 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4871 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4872 	uint16_t ctlr_count;
4873 	int count;
4874 
4875 	/* ATAPI protocol state - HP2: Check_Status_B */
4876 
4877 	status = nv_get8(cmdhdl, nvp->nvp_status);
4878 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4879 	    "nv_intr_pkt_pio: status 0x%x", status);
4880 
4881 	if (status & SATA_STATUS_BSY) {
4882 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4883 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4884 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4885 		} else {
4886 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4887 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4888 			nvp->nvp_state |= NV_PORT_RESET;
4889 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
4890 			    NV_PORT_RESET_RETRY);
4891 			nv_reset(nvp, "intr_pkt_pio");
4892 		}
4893 
4894 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4895 		    "nv_intr_pkt_pio: busy - status 0x%x", status);
4896 
4897 		return;
4898 	}
4899 
4900 	if ((status & SATA_STATUS_DF) != 0) {
4901 		/*
4902 		 * On device fault, just clean up and bail.  Request sense
4903 		 * will just default to its NO SENSE initialized value.
4904 		 */
4905 
4906 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4907 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4908 		}
4909 
4910 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4911 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4912 
4913 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4914 		    nvp->nvp_altstatus);
4915 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4916 		    nvp->nvp_error);
4917 
4918 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4919 		    "nv_intr_pkt_pio: device fault", NULL);
4920 
4921 		return;
4922 	}
4923 
4924 	if ((status & SATA_STATUS_ERR) != 0) {
4925 		/*
4926 		 * On command error, figure out whether we are processing a
4927 		 * request sense.  If so, clean up and bail.  Otherwise,
4928 		 * do a REQUEST SENSE.
4929 		 */
4930 
4931 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4932 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4933 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4934 			    NV_FAILURE) {
4935 				nv_copy_registers(nvp, &spkt->satapkt_device,
4936 				    spkt);
4937 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4938 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4939 			}
4940 
4941 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4942 			    nvp->nvp_altstatus);
4943 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4944 			    nvp->nvp_error);
4945 		} else {
4946 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4947 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4948 
4949 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4950 		}
4951 
4952 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4953 		    "nv_intr_pkt_pio: error (status 0x%x)", status);
4954 
4955 		return;
4956 	}
4957 
4958 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4959 		/*
4960 		 * REQUEST SENSE command processing
4961 		 */
4962 
4963 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4964 			/* ATAPI state - HP4: Transfer_Data */
4965 
4966 			/* read the byte count from the controller */
4967 			ctlr_count =
4968 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4969 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4970 
4971 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4972 			    "nv_intr_pkt_pio: ctlr byte count - %d",
4973 			    ctlr_count);
4974 
4975 			if (ctlr_count == 0) {
4976 				/* no data to transfer - some devices do this */
4977 
4978 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4979 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4980 
4981 				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4982 				    "nv_intr_pkt_pio: done (no data)", NULL);
4983 
4984 				return;
4985 			}
4986 
4987 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4988 
4989 			/* transfer the data */
4990 			ddi_rep_get16(cmdhdl,
4991 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4992 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4993 			    DDI_DEV_NO_AUTOINCR);
4994 
4995 			/* consume residual bytes */
4996 			ctlr_count -= count;
4997 
4998 			if (ctlr_count > 0) {
4999 				for (; ctlr_count > 0; ctlr_count -= 2)
5000 					(void) ddi_get16(cmdhdl,
5001 					    (ushort_t *)nvp->nvp_data);
5002 			}
5003 
5004 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5005 			    "nv_intr_pkt_pio: transition to HP2", NULL);
5006 		} else {
5007 			/* still in ATAPI state - HP2 */
5008 
5009 			/*
5010 			 * In order to avoid clobbering the rqsense data
5011 			 * set by the SATA framework, the sense data read
5012 			 * from the device is put in a separate buffer and
5013 			 * copied into the packet after the request sense
5014 			 * command successfully completes.
5015 			 */
5016 			bcopy(nv_slotp->nvslot_rqsense_buff,
5017 			    spkt->satapkt_cmd.satacmd_rqsense,
5018 			    SATA_ATAPI_RQSENSE_LEN);
5019 
5020 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5021 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5022 
5023 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5024 			    "nv_intr_pkt_pio: request sense done", NULL);
5025 		}
5026 
5027 		return;
5028 	}
5029 
5030 	/*
5031 	 * Normal command processing
5032 	 */
5033 
5034 	if ((status & (SATA_STATUS_DRQ)) != 0) {
5035 		/* ATAPI protocol state - HP4: Transfer_Data */
5036 
5037 		/* read the byte count from the controller */
5038 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5039 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5040 
5041 		if (ctlr_count == 0) {
5042 			/* no data to transfer - some devices do this */
5043 
5044 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
5045 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5046 
5047 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5048 			    "nv_intr_pkt_pio: done (no data)", NULL);
5049 
5050 			return;
5051 		}
5052 
5053 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
5054 
5055 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5056 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count);
5057 
5058 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5059 		    "nv_intr_pkt_pio: byte_count 0x%x",
5060 		    nv_slotp->nvslot_byte_count);
5061 
5062 		/* transfer the data */
5063 
5064 		if (direction == SATA_DIR_READ) {
5065 			ddi_rep_get16(cmdhdl,
5066 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5067 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5068 			    DDI_DEV_NO_AUTOINCR);
5069 
5070 			ctlr_count -= count;
5071 
5072 			if (ctlr_count > 0) {
5073 				/* consume remainding bytes */
5074 
5075 				for (; ctlr_count > 0;
5076 				    ctlr_count -= 2)
5077 					(void) ddi_get16(cmdhdl,
5078 					    (ushort_t *)nvp->nvp_data);
5079 
5080 				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5081 				    "nv_intr_pkt_pio: bytes remained", NULL);
5082 			}
5083 		} else {
5084 			ddi_rep_put16(cmdhdl,
5085 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5086 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5087 			    DDI_DEV_NO_AUTOINCR);
5088 		}
5089 
5090 		nv_slotp->nvslot_v_addr += count;
5091 		nv_slotp->nvslot_byte_count -= count;
5092 
5093 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5094 		    "nv_intr_pkt_pio: transition to HP2", NULL);
5095 	} else {
5096 		/* still in ATAPI state - HP2 */
5097 
5098 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
5099 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5100 
5101 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5102 		    "nv_intr_pkt_pio: done", NULL);
5103 	}
5104 }
5105 
5106 /*
5107  * ATA command, DMA data in/out
5108  */
5109 static void
5110 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5111 {
5112 	uchar_t status;
5113 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5114 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5115 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5116 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5117 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5118 	uchar_t	bmicx;
5119 	uchar_t bm_status;
5120 
5121 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5122 
5123 	/*
5124 	 * stop DMA engine.
5125 	 */
5126 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5127 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
5128 
5129 	/*
5130 	 * get the status and clear the IRQ, and check for DMA error
5131 	 */
5132 	status = nv_get8(cmdhdl, nvp->nvp_status);
5133 
5134 	/*
5135 	 * check for drive errors
5136 	 */
5137 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5138 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5139 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5140 		(void) nv_bm_status_clear(nvp);
5141 
5142 		return;
5143 	}
5144 
5145 	bm_status = nv_bm_status_clear(nvp);
5146 
5147 	/*
5148 	 * check for bus master errors
5149 	 */
5150 	if (bm_status & BMISX_IDERR) {
5151 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
5152 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5153 		    nvp->nvp_altstatus);
5154 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5155 		nvp->nvp_state |= NV_PORT_RESET;
5156 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5157 		nv_reset(nvp, "intr_dma");
5158 
5159 		return;
5160 	}
5161 
5162 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
5163 }
5164 
5165 
5166 /*
5167  * Wait for a register of a controller to achieve a specific state.
5168  * To return normally, all the bits in the first sub-mask must be ON,
5169  * all the bits in the second sub-mask must be OFF.
5170  * If timeout_usec microseconds pass without the controller achieving
5171  * the desired bit configuration, return TRUE, else FALSE.
5172  *
5173  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5174  * occur for the first 250 us, then switch over to a sleeping wait.
5175  *
5176  */
5177 int
5178 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5179     int type_wait)
5180 {
5181 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5182 	hrtime_t end, cur, start_sleep, start;
5183 	int first_time = B_TRUE;
5184 	ushort_t val;
5185 
5186 	for (;;) {
5187 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5188 
5189 		if ((val & onbits) == onbits && (val & offbits) == 0) {
5190 
5191 			return (B_TRUE);
5192 		}
5193 
5194 		cur = gethrtime();
5195 
5196 		/*
5197 		 * store the start time and calculate the end
5198 		 * time.  also calculate "start_sleep" which is
5199 		 * the point after which the driver will stop busy
5200 		 * waiting and change to sleep waiting.
5201 		 */
5202 		if (first_time) {
5203 			first_time = B_FALSE;
5204 			/*
5205 			 * start and end are in nanoseconds
5206 			 */
5207 			start = cur;
5208 			end = start + timeout_usec * 1000;
5209 			/*
5210 			 * add 1 ms to start
5211 			 */
5212 			start_sleep =  start + 250000;
5213 
5214 			if (servicing_interrupt()) {
5215 				type_wait = NV_NOSLEEP;
5216 			}
5217 		}
5218 
5219 		if (cur > end) {
5220 
5221 			break;
5222 		}
5223 
5224 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5225 #if ! defined(__lock_lint)
5226 			delay(1);
5227 #endif
5228 		} else {
5229 			drv_usecwait(nv_usec_delay);
5230 		}
5231 	}
5232 
5233 	return (B_FALSE);
5234 }
5235 
5236 
5237 /*
5238  * This is a slightly more complicated version that checks
5239  * for error conditions and bails-out rather than looping
5240  * until the timeout is exceeded.
5241  *
5242  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5243  * occur for the first 250 us, then switch over to a sleeping wait.
5244  */
5245 int
5246 nv_wait3(
5247 	nv_port_t	*nvp,
5248 	uchar_t		onbits1,
5249 	uchar_t		offbits1,
5250 	uchar_t		failure_onbits2,
5251 	uchar_t		failure_offbits2,
5252 	uchar_t		failure_onbits3,
5253 	uchar_t		failure_offbits3,
5254 	uint_t		timeout_usec,
5255 	int		type_wait)
5256 {
5257 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5258 	hrtime_t end, cur, start_sleep, start;
5259 	int first_time = B_TRUE;
5260 	ushort_t val;
5261 
5262 	for (;;) {
5263 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5264 
5265 		/*
5266 		 * check for expected condition
5267 		 */
5268 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5269 
5270 			return (B_TRUE);
5271 		}
5272 
5273 		/*
5274 		 * check for error conditions
5275 		 */
5276 		if ((val & failure_onbits2) == failure_onbits2 &&
5277 		    (val & failure_offbits2) == 0) {
5278 
5279 			return (B_FALSE);
5280 		}
5281 
5282 		if ((val & failure_onbits3) == failure_onbits3 &&
5283 		    (val & failure_offbits3) == 0) {
5284 
5285 			return (B_FALSE);
5286 		}
5287 
5288 		/*
5289 		 * store the start time and calculate the end
5290 		 * time.  also calculate "start_sleep" which is
5291 		 * the point after which the driver will stop busy
5292 		 * waiting and change to sleep waiting.
5293 		 */
5294 		if (first_time) {
5295 			first_time = B_FALSE;
5296 			/*
5297 			 * start and end are in nanoseconds
5298 			 */
5299 			cur = start = gethrtime();
5300 			end = start + timeout_usec * 1000;
5301 			/*
5302 			 * add 1 ms to start
5303 			 */
5304 			start_sleep =  start + 250000;
5305 
5306 			if (servicing_interrupt()) {
5307 				type_wait = NV_NOSLEEP;
5308 			}
5309 		} else {
5310 			cur = gethrtime();
5311 		}
5312 
5313 		if (cur > end) {
5314 
5315 			break;
5316 		}
5317 
5318 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5319 #if ! defined(__lock_lint)
5320 			delay(1);
5321 #endif
5322 		} else {
5323 			drv_usecwait(nv_usec_delay);
5324 		}
5325 	}
5326 
5327 	return (B_FALSE);
5328 }
5329 
5330 
5331 /*
5332  * nv_port_state_change() reports the state of the port to the
5333  * sata module by calling sata_hba_event_notify().  This
5334  * function is called any time the state of the port is changed
5335  */
5336 static void
5337 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5338 {
5339 	sata_device_t sd;
5340 
5341 	NVLOG(NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5342 	    "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5343 	    "time %ld (ticks)", event, addr_type, state, ddi_get_lbolt());
5344 
5345 	bzero((void *)&sd, sizeof (sata_device_t));
5346 	sd.satadev_rev = SATA_DEVICE_REV;
5347 	nv_copy_registers(nvp, &sd, NULL);
5348 
5349 	/*
5350 	 * When NCQ is implemented sactive and snotific field need to be
5351 	 * updated.
5352 	 */
5353 	sd.satadev_addr.cport = nvp->nvp_port_num;
5354 	sd.satadev_addr.qual = addr_type;
5355 	sd.satadev_state = state;
5356 
5357 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5358 }
5359 
5360 
5361 
5362 /*
5363  * Monitor reset progress and signature gathering.
5364  * This function may loop, so it should not be called from interrupt
5365  * context.
5366  *
5367  * Entered with nvp mutex held.
5368  */
5369 static void
5370 nv_monitor_reset(nv_port_t *nvp)
5371 {
5372 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5373 	uint32_t sstatus;
5374 	int send_notification = B_FALSE;
5375 	uint8_t dev_type;
5376 
5377 	sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5378 
5379 	/*
5380 	 * We do not know here the reason for port reset.
5381 	 * Check the link status. The link needs to be active before
5382 	 * we can check the link's status.
5383 	 */
5384 	if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5385 	    (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5386 		/*
5387 		 * Either link is not active or there is no device
5388 		 * If the link remains down for more than NV_LINK_DOWN_TIMEOUT
5389 		 * (milliseconds), abort signature acquisition and complete
5390 		 * reset processing.
5391 		 * The link will go down when COMRESET is sent by nv_reset(),
5392 		 * so it is practically nvp_reset_time milliseconds.
5393 		 */
5394 
5395 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5396 		    NV_LINK_DOWN_TIMEOUT) {
5397 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5398 			    "nv_monitor_reset: no link - ending signature "
5399 			    "acquisition; time after reset %ldms",
5400 			    TICK_TO_MSEC(ddi_get_lbolt() -
5401 			    nvp->nvp_reset_time));
5402 		}
5403 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5404 		    NV_PORT_PROBE | NV_PORT_HOTPLUG_DELAY);
5405 		/*
5406 		 * Else, if the link was lost (i.e. was present before)
5407 		 * the controller should generate a 'remove' interrupt
5408 		 * that will cause the appropriate event notification.
5409 		 */
5410 		return;
5411 	}
5412 
5413 	NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5414 	    "nv_monitor_reset: link up after reset; time %ldms",
5415 	    TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time));
5416 
5417 sig_read:
5418 	if (nvp->nvp_signature != 0) {
5419 		/*
5420 		 * The link is up. The signature was acquired before (device
5421 		 * was present).
5422 		 * But we may need to wait for the signature (D2H FIS) before
5423 		 * accessing the drive.
5424 		 */
5425 		if (nv_wait_for_signature != 0) {
5426 			uint32_t old_signature;
5427 			uint8_t old_type;
5428 
5429 			old_signature = nvp->nvp_signature;
5430 			old_type = nvp->nvp_type;
5431 			nvp->nvp_signature = 0;
5432 			nv_read_signature(nvp);
5433 			if (nvp->nvp_signature == 0) {
5434 				nvp->nvp_signature = old_signature;
5435 				nvp->nvp_type = old_type;
5436 
5437 #ifdef NV_DEBUG
5438 				/* FOR DEBUGGING */
5439 				if (nv_wait_here_forever) {
5440 					drv_usecwait(1000);
5441 					goto sig_read;
5442 				}
5443 #endif
5444 				/*
5445 				 * Wait, but not endlessly.
5446 				 */
5447 				if (TICK_TO_MSEC(ddi_get_lbolt() -
5448 				    nvp->nvp_reset_time) <
5449 				    nv_sig_acquisition_time) {
5450 					drv_usecwait(1000);
5451 					goto sig_read;
5452 				} else if (!(nvp->nvp_state &
5453 				    NV_PORT_RESET_RETRY)) {
5454 					/*
5455 					 * Retry reset.
5456 					 */
5457 					NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5458 					    "nv_monitor_reset: retrying reset "
5459 					    "time after first reset: %ldms",
5460 					    TICK_TO_MSEC(ddi_get_lbolt() -
5461 					    nvp->nvp_reset_time));
5462 					nvp->nvp_state |= NV_PORT_RESET_RETRY;
5463 					nv_reset(nvp, "monitor_reset 1");
5464 					goto sig_read;
5465 				}
5466 
5467 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5468 				    "nv_monitor_reset: terminating signature "
5469 				    "acquisition (1); time after reset: %ldms",
5470 				    TICK_TO_MSEC(ddi_get_lbolt() -
5471 				    nvp->nvp_reset_time));
5472 			} else {
5473 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5474 				    "nv_monitor_reset: signature acquired; "
5475 				    "time after reset: %ldms",
5476 				    TICK_TO_MSEC(ddi_get_lbolt() -
5477 				    nvp->nvp_reset_time));
5478 			}
5479 		}
5480 		/*
5481 		 * Clear reset state, set device reset recovery state
5482 		 */
5483 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5484 		    NV_PORT_PROBE);
5485 		nvp->nvp_state |= NV_PORT_RESTORE;
5486 
5487 		/*
5488 		 * Need to send reset event notification
5489 		 */
5490 		send_notification = B_TRUE;
5491 	} else {
5492 		/*
5493 		 * The link is up. The signature was not acquired before.
5494 		 * We can try to fetch a device signature.
5495 		 */
5496 		dev_type = nvp->nvp_type;
5497 
5498 acquire_signature:
5499 		nv_read_signature(nvp);
5500 		if (nvp->nvp_signature != 0) {
5501 			/*
5502 			 * Got device signature.
5503 			 */
5504 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5505 			    "nv_monitor_reset: signature acquired; "
5506 			    "time after reset: %ldms",
5507 			    TICK_TO_MSEC(ddi_get_lbolt() -
5508 			    nvp->nvp_reset_time));
5509 
5510 			/* Clear internal reset state */
5511 			nvp->nvp_state &=
5512 			    ~(NV_PORT_RESET | NV_PORT_RESET_RETRY);
5513 
5514 			if (dev_type != SATA_DTYPE_NONE) {
5515 				/*
5516 				 * We acquired the signature for a
5517 				 * pre-existing device that was not identified
5518 				 * before and and was reset.
5519 				 * Need to enter the device reset recovery
5520 				 * state and to send the reset notification.
5521 				 */
5522 				nvp->nvp_state |= NV_PORT_RESTORE;
5523 				send_notification = B_TRUE;
5524 			} else {
5525 				/*
5526 				 * Else, We acquired the signature because a new
5527 				 * device was attached (the driver attach or
5528 				 * a hot-plugged device). There is no need to
5529 				 * enter the device reset recovery state or to
5530 				 * send the reset notification, but we may need
5531 				 * to send a device attached notification.
5532 				 */
5533 				if (nvp->nvp_state & NV_PORT_PROBE) {
5534 					nv_port_state_change(nvp,
5535 					    SATA_EVNT_DEVICE_ATTACHED,
5536 					    SATA_ADDR_CPORT, 0);
5537 					nvp->nvp_state &= ~NV_PORT_PROBE;
5538 				}
5539 			}
5540 		} else {
5541 			if (TICK_TO_MSEC(ddi_get_lbolt() -
5542 			    nvp->nvp_reset_time) < nv_sig_acquisition_time) {
5543 				drv_usecwait(1000);
5544 				goto acquire_signature;
5545 			} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
5546 				/*
5547 				 * Some drives may require additional
5548 				 * reset(s) to get a valid signature
5549 				 * (indicating that the drive is ready).
5550 				 * If a drive was not just powered
5551 				 * up, the signature should be available
5552 				 * within few hundred milliseconds
5553 				 * after reset.  Therefore, if more than
5554 				 * NV_SIG_ACQUISITION_TIME has elapsed
5555 				 * while waiting for a signature, reset
5556 				 * device again.
5557 				 */
5558 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5559 				    "nv_monitor_reset: retrying reset "
5560 				    "time after first reset: %ldms",
5561 				    TICK_TO_MSEC(ddi_get_lbolt() -
5562 				    nvp->nvp_reset_time));
5563 				nvp->nvp_state |= NV_PORT_RESET_RETRY;
5564 				nv_reset(nvp, "monitor_reset 2");
5565 				drv_usecwait(1000);
5566 				goto acquire_signature;
5567 			}
5568 			/*
5569 			 * Terminating signature acquisition.
5570 			 * Hopefully, the drive is ready.
5571 			 * The SATA module can deal with this as long as it
5572 			 * knows that some device is attached and a device
5573 			 * responds to commands.
5574 			 */
5575 			if (!(nvp->nvp_state & NV_PORT_PROBE)) {
5576 				send_notification = B_TRUE;
5577 			}
5578 			nvp->nvp_state &= ~(NV_PORT_RESET |
5579 			    NV_PORT_RESET_RETRY);
5580 			nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5581 			if (nvp->nvp_state & NV_PORT_PROBE) {
5582 				nv_port_state_change(nvp,
5583 				    SATA_EVNT_DEVICE_ATTACHED,
5584 				    SATA_ADDR_CPORT, 0);
5585 				nvp->nvp_state &= ~NV_PORT_PROBE;
5586 			}
5587 			nvp->nvp_type = dev_type;
5588 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5589 			    "nv_monitor_reset: terminating signature "
5590 			    "acquisition (2); time after reset: %ldms",
5591 			    TICK_TO_MSEC(ddi_get_lbolt() -
5592 			    nvp->nvp_reset_time));
5593 		}
5594 	}
5595 
5596 	if (send_notification) {
5597 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5598 		    SATA_ADDR_DCPORT,
5599 		    SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5600 	}
5601 
5602 #ifdef SGPIO_SUPPORT
5603 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5604 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5605 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5606 	} else {
5607 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5608 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5609 	}
5610 #endif
5611 }
5612 
5613 
5614 /*
5615  * Send a hotplug (add device) notification at the appropriate time after
5616  * hotplug detection.
5617  * Relies on nvp_reset_time set at a hotplug detection time.
5618  * Called only from nv_timeout when NV_PORT_HOTPLUG_DELAY flag is set in
5619  * the nvp_state.
5620  */
5621 static void
5622 nv_delay_hotplug_notification(nv_port_t *nvp)
5623 {
5624 
5625 	if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5626 	    nv_hotplug_delay) {
5627 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5628 		    "nv_delay_hotplug_notification: notifying framework after "
5629 		    "%dms delay", TICK_TO_MSEC(ddi_get_lbolt() -
5630 		    nvp->nvp_reset_time));
5631 		nvp->nvp_state &= ~NV_PORT_HOTPLUG_DELAY;
5632 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5633 		    SATA_ADDR_CPORT, 0);
5634 	}
5635 }
5636 
5637 /*
5638  * timeout processing:
5639  *
5640  * Check if any packets have crossed a timeout threshold.  If so,
5641  * abort the packet.  This function is not NCQ-aware.
5642  *
5643  * If reset was invoked, call reset monitoring function.
5644  *
5645  * Timeout frequency may be lower for checking packet timeout (1s)
5646  * and higher for reset monitoring (1ms)
5647  *
5648  */
5649 static void
5650 nv_timeout(void *arg)
5651 {
5652 	nv_port_t *nvp = arg;
5653 	nv_slot_t *nv_slotp;
5654 	int next_timeout = NV_ONE_SEC;	/* Default */
5655 	uint16_t int_status;
5656 	uint8_t status, bmstatus;
5657 	static int intr_warn_once = 0;
5658 
5659 	ASSERT(nvp != NULL);
5660 
5661 	mutex_enter(&nvp->nvp_mutex);
5662 	nvp->nvp_timeout_id = 0;
5663 
5664 	/*
5665 	 * If the port is not in the init state, ignore it.
5666 	 */
5667 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5668 		NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5669 		    "nv_timeout: port uninitialized", NULL);
5670 		next_timeout = 0;
5671 
5672 		goto finished;
5673 	}
5674 
5675 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
5676 		nv_monitor_reset(nvp);
5677 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5678 
5679 		goto finished;
5680 	}
5681 
5682 	if ((nvp->nvp_state & NV_PORT_HOTPLUG_DELAY) != 0) {
5683 		nv_delay_hotplug_notification(nvp);
5684 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5685 
5686 		goto finished;
5687 	}
5688 
5689 	/*
5690 	 * Not yet NCQ-aware - there is only one command active.
5691 	 */
5692 	nv_slotp = &(nvp->nvp_slot[0]);
5693 
5694 	/*
5695 	 * perform timeout checking and processing only if there is an
5696 	 * active packet on the port
5697 	 */
5698 	if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL)  {
5699 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5700 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5701 		uint8_t cmd = satacmd->satacmd_cmd_reg;
5702 		uint64_t lba;
5703 
5704 #if ! defined(__lock_lint) && defined(DEBUG)
5705 
5706 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5707 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5708 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5709 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5710 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5711 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5712 #endif
5713 
5714 		/*
5715 		 * timeout not needed if there is a polling thread
5716 		 */
5717 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5718 			next_timeout = 0;
5719 
5720 			goto finished;
5721 		}
5722 
5723 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5724 		    spkt->satapkt_time) {
5725 
5726 			uint32_t serr = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5727 			    nvp->nvp_serror);
5728 
5729 			nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5730 			    "nv_timeout: aborting: "
5731 			    "nvslot_stime: %ld max ticks till timeout: "
5732 			    "%ld cur_time: %ld cmd=%x lba=%d seq=%d",
5733 			    nv_slotp->nvslot_stime,
5734 			    drv_usectohz(MICROSEC *
5735 			    spkt->satapkt_time), ddi_get_lbolt(),
5736 			    cmd, lba, nvp->nvp_seq);
5737 
5738 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5739 			    "nv_timeout: SError at timeout: 0x%x", serr);
5740 
5741 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5742 			    "nv_timeout: previous cmd=%x",
5743 			    nvp->nvp_previous_cmd);
5744 
5745 			if (nvp->nvp_mcp5x_int_status != NULL) {
5746 				status = nv_get8(nvp->nvp_ctl_hdl,
5747 				    nvp->nvp_altstatus);
5748 				bmstatus = nv_get8(nvp->nvp_bm_hdl,
5749 				    nvp->nvp_bmisx);
5750 				int_status = nv_get16(
5751 				    nvp->nvp_ctlp->nvc_bar_hdl[5],
5752 				    nvp->nvp_mcp5x_int_status);
5753 				NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5754 				    "nv_timeout: altstatus %x, bmicx %x, "
5755 				    "int_status %X", status, bmstatus,
5756 				    int_status);
5757 
5758 				if (int_status & MCP5X_INT_COMPLETE) {
5759 					/*
5760 					 * Completion interrupt was missed!
5761 					 * Issue warning message once
5762 					 */
5763 					if (!intr_warn_once) {
5764 						nv_cmn_err(CE_WARN,
5765 						    nvp->nvp_ctlp,
5766 						    nvp,
5767 						    "nv_sata: missing command "
5768 						    "completion interrupt(s)!");
5769 						intr_warn_once = 1;
5770 					}
5771 					NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp,
5772 					    nvp, "timeout detected with "
5773 					    "interrupt ready - calling "
5774 					    "int directly", NULL);
5775 					mutex_exit(&nvp->nvp_mutex);
5776 					(void) mcp5x_intr_port(nvp);
5777 					mutex_enter(&nvp->nvp_mutex);
5778 				} else {
5779 					/*
5780 					 * True timeout and not a missing
5781 					 * interrupt.
5782 					 */
5783 					(void) nv_abort_active(nvp, spkt,
5784 					    SATA_PKT_TIMEOUT, B_TRUE);
5785 				}
5786 			} else {
5787 				(void) nv_abort_active(nvp, spkt,
5788 				    SATA_PKT_TIMEOUT, B_TRUE);
5789 			}
5790 
5791 		} else {
5792 			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5793 			    "nv_timeout:"
5794 			    " still in use so restarting timeout",
5795 			    NULL);
5796 
5797 			next_timeout = NV_ONE_SEC;
5798 		}
5799 	} else {
5800 		/*
5801 		 * there was no active packet, so do not re-enable timeout
5802 		 */
5803 		next_timeout = 0;
5804 		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5805 		    "nv_timeout: no active packet so not re-arming "
5806 		    "timeout", NULL);
5807 	}
5808 
5809 finished:
5810 	if (next_timeout != 0) {
5811 		nv_setup_timeout(nvp, next_timeout);
5812 	}
5813 	mutex_exit(&nvp->nvp_mutex);
5814 }
5815 
5816 
5817 /*
5818  * enable or disable the 3 interrupt types the driver is
5819  * interested in: completion, add and remove.
5820  */
5821 static void
5822 ck804_set_intr(nv_port_t *nvp, int flag)
5823 {
5824 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5825 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5826 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5827 	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5828 	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5829 	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5830 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5831 
5832 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5833 		int_en = nv_get8(bar5_hdl,
5834 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5835 		int_en &= ~intr_bits[port];
5836 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5837 		    int_en);
5838 		return;
5839 	}
5840 
5841 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5842 
5843 	/*
5844 	 * controller level lock also required since access to an 8-bit
5845 	 * interrupt register is shared between both channels.
5846 	 */
5847 	mutex_enter(&nvc->nvc_mutex);
5848 
5849 	if (flag & NV_INTR_CLEAR_ALL) {
5850 		NVLOG(NVDBG_INTR, nvc, nvp,
5851 		    "ck804_set_intr: NV_INTR_CLEAR_ALL", NULL);
5852 
5853 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5854 		    (uint8_t *)(nvc->nvc_ck804_int_status));
5855 
5856 		if (intr_status & clear_all_bits[port]) {
5857 
5858 			nv_put8(nvc->nvc_bar_hdl[5],
5859 			    (uint8_t *)(nvc->nvc_ck804_int_status),
5860 			    clear_all_bits[port]);
5861 
5862 			NVLOG(NVDBG_INTR, nvc, nvp,
5863 			    "interrupt bits cleared %x",
5864 			    intr_status & clear_all_bits[port]);
5865 		}
5866 	}
5867 
5868 	if (flag & NV_INTR_DISABLE) {
5869 		NVLOG(NVDBG_INTR, nvc, nvp,
5870 		    "ck804_set_intr: NV_INTR_DISABLE", NULL);
5871 		int_en = nv_get8(bar5_hdl,
5872 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5873 		int_en &= ~intr_bits[port];
5874 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5875 		    int_en);
5876 	}
5877 
5878 	if (flag & NV_INTR_ENABLE) {
5879 		NVLOG(NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE",
5880 		    NULL);
5881 		int_en = nv_get8(bar5_hdl,
5882 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5883 		int_en |= intr_bits[port];
5884 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5885 		    int_en);
5886 	}
5887 
5888 	mutex_exit(&nvc->nvc_mutex);
5889 }
5890 
5891 
5892 /*
5893  * enable or disable the 3 interrupts the driver is interested in:
5894  * completion interrupt, hot add, and hot remove interrupt.
5895  */
5896 static void
5897 mcp5x_set_intr(nv_port_t *nvp, int flag)
5898 {
5899 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5900 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5901 	uint16_t intr_bits =
5902 	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5903 	uint16_t int_en;
5904 
5905 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5906 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5907 		int_en &= ~intr_bits;
5908 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5909 		return;
5910 	}
5911 
5912 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5913 
5914 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag);
5915 
5916 	if (flag & NV_INTR_CLEAR_ALL) {
5917 		NVLOG(NVDBG_INTR, nvc, nvp,
5918 		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL", NULL);
5919 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5920 	}
5921 
5922 	if (flag & NV_INTR_ENABLE) {
5923 		NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE",
5924 		    NULL);
5925 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5926 		int_en |= intr_bits;
5927 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5928 	}
5929 
5930 	if (flag & NV_INTR_DISABLE) {
5931 		NVLOG(NVDBG_INTR, nvc, nvp,
5932 		    "mcp5x_set_intr: NV_INTR_DISABLE", NULL);
5933 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5934 		int_en &= ~intr_bits;
5935 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5936 	}
5937 }
5938 
5939 
5940 static void
5941 nv_resume(nv_port_t *nvp)
5942 {
5943 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()", NULL);
5944 
5945 	mutex_enter(&nvp->nvp_mutex);
5946 
5947 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5948 		mutex_exit(&nvp->nvp_mutex);
5949 
5950 		return;
5951 	}
5952 
5953 	/* Enable interrupt */
5954 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5955 
5956 	/*
5957 	 * Power may have been removed to the port and the
5958 	 * drive, and/or a drive may have been added or removed.
5959 	 * Force a reset which will cause a probe and re-establish
5960 	 * any state needed on the drive.
5961 	 */
5962 	nvp->nvp_state |= NV_PORT_RESET;
5963 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5964 	nv_reset(nvp, "resume");
5965 
5966 	mutex_exit(&nvp->nvp_mutex);
5967 }
5968 
5969 
5970 static void
5971 nv_suspend(nv_port_t *nvp)
5972 {
5973 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()", NULL);
5974 
5975 	mutex_enter(&nvp->nvp_mutex);
5976 
5977 #ifdef SGPIO_SUPPORT
5978 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5979 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5980 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5981 	}
5982 #endif
5983 
5984 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5985 		mutex_exit(&nvp->nvp_mutex);
5986 
5987 		return;
5988 	}
5989 
5990 	/*
5991 	 * Stop the timeout handler.
5992 	 * (It will be restarted in nv_reset() during nv_resume().)
5993 	 */
5994 	if (nvp->nvp_timeout_id) {
5995 		(void) untimeout(nvp->nvp_timeout_id);
5996 		nvp->nvp_timeout_id = 0;
5997 	}
5998 
5999 	/* Disable interrupt */
6000 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
6001 	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
6002 
6003 	mutex_exit(&nvp->nvp_mutex);
6004 }
6005 
6006 
6007 static void
6008 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
6009 {
6010 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6011 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
6012 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
6013 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6014 	uchar_t status;
6015 	struct sata_cmd_flags flags;
6016 
6017 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6018 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
6019 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6020 
6021 	if (spkt == NULL) {
6022 
6023 		return;
6024 	}
6025 
6026 	/*
6027 	 * in the error case, implicitly set the return of regs needed
6028 	 * for error handling.
6029 	 */
6030 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
6031 	    nvp->nvp_altstatus);
6032 
6033 	flags = scmd->satacmd_flags;
6034 
6035 	if (status & SATA_STATUS_ERR) {
6036 		flags.sata_copy_out_lba_low_msb = B_TRUE;
6037 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
6038 		flags.sata_copy_out_lba_high_msb = B_TRUE;
6039 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
6040 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
6041 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
6042 		flags.sata_copy_out_error_reg = B_TRUE;
6043 		flags.sata_copy_out_sec_count_msb = B_TRUE;
6044 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
6045 		scmd->satacmd_status_reg = status;
6046 	}
6047 
6048 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
6049 
6050 		/*
6051 		 * set HOB so that high byte will be read
6052 		 */
6053 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
6054 
6055 		/*
6056 		 * get the requested high bytes
6057 		 */
6058 		if (flags.sata_copy_out_sec_count_msb) {
6059 			scmd->satacmd_sec_count_msb =
6060 			    nv_get8(cmdhdl, nvp->nvp_count);
6061 		}
6062 
6063 		if (flags.sata_copy_out_lba_low_msb) {
6064 			scmd->satacmd_lba_low_msb =
6065 			    nv_get8(cmdhdl, nvp->nvp_sect);
6066 		}
6067 
6068 		if (flags.sata_copy_out_lba_mid_msb) {
6069 			scmd->satacmd_lba_mid_msb =
6070 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
6071 		}
6072 
6073 		if (flags.sata_copy_out_lba_high_msb) {
6074 			scmd->satacmd_lba_high_msb =
6075 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
6076 		}
6077 	}
6078 
6079 	/*
6080 	 * disable HOB so that low byte is read
6081 	 */
6082 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6083 
6084 	/*
6085 	 * get the requested low bytes
6086 	 */
6087 	if (flags.sata_copy_out_sec_count_lsb) {
6088 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6089 	}
6090 
6091 	if (flags.sata_copy_out_lba_low_lsb) {
6092 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6093 	}
6094 
6095 	if (flags.sata_copy_out_lba_mid_lsb) {
6096 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6097 	}
6098 
6099 	if (flags.sata_copy_out_lba_high_lsb) {
6100 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6101 	}
6102 
6103 	/*
6104 	 * get the device register if requested
6105 	 */
6106 	if (flags.sata_copy_out_device_reg) {
6107 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
6108 	}
6109 
6110 	/*
6111 	 * get the error register if requested
6112 	 */
6113 	if (flags.sata_copy_out_error_reg) {
6114 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6115 	}
6116 }
6117 
6118 
6119 /*
6120  * Hot plug and remove interrupts can occur when the device is reset.  Just
6121  * masking the interrupt doesn't always work well because if a
6122  * different interrupt arrives on the other port, the driver can still
6123  * end up checking the state of the other port and discover the hot
6124  * interrupt flag is set even though it was masked.  Checking for recent
6125  * reset activity and then ignoring turns out to be the easiest way.
6126  *
6127  * Entered with nvp mutex held.
6128  */
6129 static void
6130 nv_report_add_remove(nv_port_t *nvp, int flags)
6131 {
6132 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6133 	uint32_t sstatus;
6134 	int i;
6135 	clock_t nv_lbolt = ddi_get_lbolt();
6136 
6137 
6138 	NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove() - "
6139 	    "time (ticks) %d flags %x", nv_lbolt, flags);
6140 
6141 	/*
6142 	 * wait up to 1ms for sstatus to settle and reflect the true
6143 	 * status of the port.  Failure to do so can create confusion
6144 	 * in probe, where the incorrect sstatus value can still
6145 	 * persist.
6146 	 */
6147 	for (i = 0; i < 1000; i++) {
6148 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6149 
6150 		if ((flags == NV_PORT_HOTREMOVED) &&
6151 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
6152 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6153 			break;
6154 		}
6155 
6156 		if ((flags != NV_PORT_HOTREMOVED) &&
6157 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
6158 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6159 			break;
6160 		}
6161 		drv_usecwait(1);
6162 	}
6163 
6164 	NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6165 	    "sstatus took %d us for DEVPRE_PHYCOM to settle", i);
6166 
6167 	if (flags == NV_PORT_HOTREMOVED) {
6168 
6169 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
6170 		    B_FALSE);
6171 
6172 		/*
6173 		 * No device, no point of bothering with device reset
6174 		 */
6175 		nvp->nvp_type = SATA_DTYPE_NONE;
6176 		nvp->nvp_signature = 0;
6177 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
6178 		    NV_PORT_RESTORE);
6179 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6180 		    "nv_report_add_remove() hot removed", NULL);
6181 		nv_port_state_change(nvp,
6182 		    SATA_EVNT_DEVICE_DETACHED,
6183 		    SATA_ADDR_CPORT, 0);
6184 
6185 #ifdef SGPIO_SUPPORT
6186 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6187 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6188 #endif
6189 	} else {
6190 		/*
6191 		 * This is a hot plug or link up indication
6192 		 * Now, re-check the link state - no link, no device
6193 		 */
6194 		if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
6195 		    (SSTATUS_GET_DET(sstatus) == SSTATUS_DET_DEVPRE_PHYCOM)) {
6196 
6197 			if (nvp->nvp_type == SATA_DTYPE_NONE) {
6198 				/*
6199 				 * Real device attach - there was no device
6200 				 * attached to this port before this report
6201 				 */
6202 				NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6203 				    "nv_report_add_remove() new device hot"
6204 				    "plugged", NULL);
6205 				nvp->nvp_reset_time = ddi_get_lbolt();
6206 				if (!(nvp->nvp_state &
6207 				    (NV_PORT_RESET_RETRY | NV_PORT_RESET))) {
6208 
6209 					nvp->nvp_signature = 0;
6210 					if (nv_reset_after_hotplug != 0) {
6211 
6212 						/*
6213 						 * Send reset to obtain a device
6214 						 * signature
6215 						 */
6216 						nvp->nvp_state |=
6217 						    NV_PORT_RESET |
6218 						    NV_PORT_PROBE;
6219 						nv_reset(nvp,
6220 						    "report_add_remove");
6221 					} else {
6222 						nvp->nvp_type =
6223 						    SATA_DTYPE_UNKNOWN;
6224 					}
6225 				}
6226 
6227 				if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6228 					if (nv_reset_after_hotplug == 0) {
6229 						/*
6230 						 * In case a hotplug interrupt
6231 						 * is generated right after a
6232 						 * link is up, delay reporting
6233 						 * a hotplug event to let the
6234 						 * drive to initialize and to
6235 						 * send a D2H FIS with a
6236 						 * signature.
6237 						 * The timeout will issue an
6238 						 * event notification after
6239 						 * the NV_HOTPLUG_DELAY
6240 						 * milliseconds delay.
6241 						 */
6242 						nvp->nvp_state |=
6243 						    NV_PORT_HOTPLUG_DELAY;
6244 						nvp->nvp_type =
6245 						    SATA_DTYPE_UNKNOWN;
6246 						/*
6247 						 * Make sure timer is running.
6248 						 */
6249 						nv_setup_timeout(nvp,
6250 						    NV_ONE_MSEC);
6251 					} else {
6252 						nv_port_state_change(nvp,
6253 						    SATA_EVNT_DEVICE_ATTACHED,
6254 						    SATA_ADDR_CPORT, 0);
6255 					}
6256 				}
6257 				return;
6258 			}
6259 			/*
6260 			 * Otherwise it is a bogus attach, indicating recovered
6261 			 * link loss. No real need to report it after-the-fact.
6262 			 * But we may keep some statistics, or notify the
6263 			 * sata module by reporting LINK_LOST/LINK_ESTABLISHED
6264 			 * events to keep track of such occurrences.
6265 			 * Anyhow, we may want to terminate signature
6266 			 * acquisition.
6267 			 */
6268 			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6269 			    "nv_report_add_remove() ignoring plug interrupt "
6270 			    "- recovered link?", NULL);
6271 
6272 			if (nvp->nvp_state &
6273 			    (NV_PORT_RESET_RETRY | NV_PORT_RESET)) {
6274 				NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6275 				    "nv_report_add_remove() - "
6276 				    "time since last reset %dms",
6277 				    TICK_TO_MSEC(ddi_get_lbolt() -
6278 				    nvp->nvp_reset_time));
6279 				/*
6280 				 * If the driver does not have to wait for
6281 				 * a signature, then terminate reset processing
6282 				 * now.
6283 				 */
6284 				if (nv_wait_for_signature == 0) {
6285 					NVLOG(NVDBG_RESET, nvp->nvp_ctlp,
6286 					    nvp, "nv_report_add_remove() - ",
6287 					    "terminating signature acquisition",
6288 					    ", time after reset: %dms",
6289 					    TICK_TO_MSEC(ddi_get_lbolt() -
6290 					    nvp->nvp_reset_time));
6291 
6292 					nvp->nvp_state &= ~(NV_PORT_RESET |
6293 					    NV_PORT_RESET_RETRY);
6294 
6295 					if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6296 						nvp->nvp_state |=
6297 						    NV_PORT_RESTORE;
6298 						nvp->nvp_state &=
6299 						    ~NV_PORT_PROBE;
6300 
6301 						/*
6302 						 * It is not the initial device
6303 						 * probing, so notify sata
6304 						 * module that device was
6305 						 * reset
6306 						 */
6307 						nv_port_state_change(nvp,
6308 						    SATA_EVNT_DEVICE_RESET,
6309 						    SATA_ADDR_DCPORT,
6310 						    SATA_DSTATE_RESET |
6311 						    SATA_DSTATE_PWR_ACTIVE);
6312 					}
6313 
6314 				}
6315 			}
6316 			return;
6317 		}
6318 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
6319 		    "ignoring add dev interrupt - "
6320 		    "link is down or no device!", NULL);
6321 	}
6322 
6323 }
6324 
6325 /*
6326  * Get request sense data and stuff it the command's sense buffer.
6327  * Start a request sense command in order to get sense data to insert
6328  * in the sata packet's rqsense buffer.  The command completion
6329  * processing is in nv_intr_pkt_pio.
6330  *
6331  * The sata framework provides a function to allocate and set-up a
6332  * request sense packet command. The reasons it is not being used here is:
6333  * a) it cannot be called in an interrupt context and this function is
6334  *    called in an interrupt context.
6335  * b) it allocates DMA resources that are not used here because this is
6336  *    implemented using PIO.
6337  *
6338  * If, in the future, this is changed to use DMA, the sata framework should
6339  * be used to allocate and set-up the error retrieval (request sense)
6340  * command.
6341  */
6342 static int
6343 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6344 {
6345 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6346 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6347 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6348 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6349 
6350 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6351 	    "nv_start_rqsense_pio: start", NULL);
6352 
6353 	/* clear the local request sense buffer before starting the command */
6354 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6355 
6356 	/* Write the request sense PACKET command */
6357 
6358 	/* select the drive */
6359 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6360 
6361 	/* make certain the drive selected */
6362 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6363 	    NV_SEC2USEC(5), 0) == B_FALSE) {
6364 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6365 		    "nv_start_rqsense_pio: drive select failed", NULL);
6366 		return (NV_FAILURE);
6367 	}
6368 
6369 	/* set up the command */
6370 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
6371 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6372 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6373 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
6374 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
6375 
6376 	/* initiate the command by writing the command register last */
6377 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6378 
6379 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6380 	NV_DELAY_NSEC(400);
6381 
6382 	/*
6383 	 * Wait for the device to indicate that it is ready for the command
6384 	 * ATAPI protocol state - HP0: Check_Status_A
6385 	 */
6386 
6387 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6388 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6389 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6390 	    4000000, 0) == B_FALSE) {
6391 		if (nv_get8(cmdhdl, nvp->nvp_status) &
6392 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6393 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6394 			    "nv_start_rqsense_pio: rqsense dev error (HP0)",
6395 			    NULL);
6396 		} else {
6397 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6398 			    "nv_start_rqsense_pio: rqsense timeout (HP0)",
6399 			    NULL);
6400 		}
6401 
6402 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6403 		nv_complete_io(nvp, spkt, 0);
6404 		nvp->nvp_state |= NV_PORT_RESET;
6405 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
6406 		nv_reset(nvp, "rqsense_pio");
6407 
6408 		return (NV_FAILURE);
6409 	}
6410 
6411 	/*
6412 	 * Put the ATAPI command in the data register
6413 	 * ATAPI protocol state - HP1: Send_Packet
6414 	 */
6415 
6416 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6417 	    (ushort_t *)nvp->nvp_data,
6418 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6419 
6420 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6421 	    "nv_start_rqsense_pio: exiting into HP3", NULL);
6422 
6423 	return (NV_SUCCESS);
6424 }
6425 
6426 /*
6427  * quiesce(9E) entry point.
6428  *
6429  * This function is called when the system is single-threaded at high
6430  * PIL with preemption disabled. Therefore, this function must not be
6431  * blocked.
6432  *
6433  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6434  * DDI_FAILURE indicates an error condition and should almost never happen.
6435  */
6436 static int
6437 nv_quiesce(dev_info_t *dip)
6438 {
6439 	int port, instance = ddi_get_instance(dip);
6440 	nv_ctl_t *nvc;
6441 
6442 	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6443 		return (DDI_FAILURE);
6444 
6445 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6446 		nv_port_t *nvp = &(nvc->nvc_port[port]);
6447 		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6448 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6449 		uint32_t sctrl;
6450 
6451 		/*
6452 		 * Stop the controllers from generating interrupts.
6453 		 */
6454 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6455 
6456 		/*
6457 		 * clear signature registers
6458 		 */
6459 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
6460 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6461 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6462 		nv_put8(cmdhdl, nvp->nvp_count, 0);
6463 
6464 		nvp->nvp_signature = 0;
6465 		nvp->nvp_type = 0;
6466 		nvp->nvp_state |= NV_PORT_RESET;
6467 		nvp->nvp_reset_time = ddi_get_lbolt();
6468 
6469 		/*
6470 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
6471 		 */
6472 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6473 
6474 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
6475 		    sctrl | SCONTROL_DET_COMRESET);
6476 
6477 		/*
6478 		 * wait 1ms
6479 		 */
6480 		drv_usecwait(1000);
6481 
6482 		/*
6483 		 * de-assert reset in PHY
6484 		 */
6485 		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6486 	}
6487 
6488 	return (DDI_SUCCESS);
6489 }
6490 
6491 
6492 #ifdef SGPIO_SUPPORT
6493 /*
6494  * NVIDIA specific SGPIO LED support
6495  * Please refer to the NVIDIA documentation for additional details
6496  */
6497 
6498 /*
6499  * nv_sgp_led_init
6500  * Detect SGPIO support.  If present, initialize.
6501  */
6502 static void
6503 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6504 {
6505 	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
6506 	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
6507 	nv_sgp_cmn_t *cmn;	/* shared data structure */
6508 	int i;
6509 	char tqname[SGPIO_TQ_NAME_LEN];
6510 	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6511 
6512 	/*
6513 	 * Initialize with appropriately invalid values in case this function
6514 	 * exits without initializing SGPIO (for example, there is no SGPIO
6515 	 * support).
6516 	 */
6517 	nvc->nvc_sgp_csr = 0;
6518 	nvc->nvc_sgp_cbp = NULL;
6519 	nvc->nvc_sgp_cmn = NULL;
6520 
6521 	/*
6522 	 * Only try to initialize SGPIO LED support if this property
6523 	 * indicates it should be.
6524 	 */
6525 	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6526 	    "enable-sgpio-leds", 0) != 1)
6527 		return;
6528 
6529 	/*
6530 	 * CK804 can pass the sgpio_detect test even though it does not support
6531 	 * SGPIO, so don't even look at a CK804.
6532 	 */
6533 	if (nvc->nvc_mcp5x_flag != B_TRUE)
6534 		return;
6535 
6536 	/*
6537 	 * The NVIDIA SGPIO support can nominally handle 6 drives.
6538 	 * However, the current implementation only supports 4 drives.
6539 	 * With two drives per controller, that means only look at the
6540 	 * first two controllers.
6541 	 */
6542 	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6543 		return;
6544 
6545 	/* confirm that the SGPIO registers are there */
6546 	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6547 		NVLOG(NVDBG_INIT, nvc, NULL,
6548 		    "SGPIO registers not detected", NULL);
6549 		return;
6550 	}
6551 
6552 	/* save off the SGPIO_CSR I/O address */
6553 	nvc->nvc_sgp_csr = csrp;
6554 
6555 	/* map in Control Block */
6556 	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6557 	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6558 
6559 	/* initialize the SGPIO h/w */
6560 	if (nv_sgp_init(nvc) == NV_FAILURE) {
6561 		nv_cmn_err(CE_WARN, nvc, NULL,
6562 		    "Unable to initialize SGPIO");
6563 	}
6564 
6565 	/*
6566 	 * Initialize the shared space for this instance.  This could
6567 	 * involve allocating the space, saving a pointer to the space
6568 	 * and starting the taskq that actually turns the LEDs on and off.
6569 	 * Or, it could involve just getting the pointer to the already
6570 	 * allocated space.
6571 	 */
6572 
6573 	mutex_enter(&nv_sgp_c2c_mutex);
6574 
6575 	/* try and find our CBP in the mapping table */
6576 	cmn = NULL;
6577 	for (i = 0; i < NV_MAX_CBPS; i++) {
6578 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6579 			cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6580 			break;
6581 		}
6582 
6583 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6584 			break;
6585 	}
6586 
6587 	if (i >= NV_MAX_CBPS) {
6588 		/*
6589 		 * CBP to shared space mapping table is full
6590 		 */
6591 		nvc->nvc_sgp_cmn = NULL;
6592 		nv_cmn_err(CE_WARN, nvc, NULL,
6593 		    "LED handling not initialized - too many controllers");
6594 	} else if (cmn == NULL) {
6595 		/*
6596 		 * Allocate the shared space, point the SGPIO scratch register
6597 		 * at it and start the led update taskq.
6598 		 */
6599 
6600 		/* allocate shared space */
6601 		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6602 		    KM_SLEEP);
6603 		if (cmn == NULL) {
6604 			nv_cmn_err(CE_WARN, nvc, NULL,
6605 			    "Failed to allocate shared data");
6606 			return;
6607 		}
6608 
6609 		nvc->nvc_sgp_cmn = cmn;
6610 
6611 		/* initialize the shared data structure */
6612 		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6613 		cmn->nvs_connected = 0;
6614 		cmn->nvs_activity = 0;
6615 		cmn->nvs_cbp = cbp;
6616 
6617 		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6618 		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6619 		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6620 
6621 		/* put the address in the SGPIO scratch register */
6622 #if defined(__amd64)
6623 		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6624 #else
6625 		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6626 #endif
6627 
6628 		/* add an entry to the cbp to cmn mapping table */
6629 
6630 		/* i should be the next available table position */
6631 		nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6632 		nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6633 
6634 		/* start the activity LED taskq */
6635 
6636 		/*
6637 		 * The taskq name should be unique and the time
6638 		 */
6639 		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6640 		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6641 		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6642 		    TASKQ_DEFAULTPRI, 0);
6643 		if (cmn->nvs_taskq == NULL) {
6644 			cmn->nvs_taskq_delay = 0;
6645 			nv_cmn_err(CE_WARN, nvc, NULL,
6646 			    "Failed to start activity LED taskq");
6647 		} else {
6648 			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6649 			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
6650 			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6651 		}
6652 	} else {
6653 		nvc->nvc_sgp_cmn = cmn;
6654 		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6655 	}
6656 
6657 	mutex_exit(&nv_sgp_c2c_mutex);
6658 }
6659 
6660 /*
6661  * nv_sgp_detect
6662  * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6663  * report back whether both were readable.
6664  */
6665 static int
6666 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6667     uint32_t *cbpp)
6668 {
6669 	/* get the SGPIO_CSRP */
6670 	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6671 	if (*csrpp == 0) {
6672 		return (NV_FAILURE);
6673 	}
6674 
6675 	/* SGPIO_CSRP is good, get the SGPIO_CBP */
6676 	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6677 	if (*cbpp == 0) {
6678 		return (NV_FAILURE);
6679 	}
6680 
6681 	/* SGPIO_CBP is good, so we must support SGPIO */
6682 	return (NV_SUCCESS);
6683 }
6684 
6685 /*
6686  * nv_sgp_init
6687  * Initialize SGPIO.
6688  * The initialization process is described by NVIDIA, but the hardware does
6689  * not always behave as documented, so several steps have been changed and/or
6690  * omitted.
6691  */
6692 static int
6693 nv_sgp_init(nv_ctl_t *nvc)
6694 {
6695 	int seq;
6696 	int rval = NV_SUCCESS;
6697 	hrtime_t start, end;
6698 	uint32_t cmd;
6699 	uint32_t status;
6700 	int drive_count;
6701 
6702 	status = nv_sgp_csr_read(nvc);
6703 	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6704 		/* SGPIO logic is in reset state and requires initialization */
6705 
6706 		/* noting the Sequence field value */
6707 		seq = SGPIO_CSR_SEQ(status);
6708 
6709 		/* issue SGPIO_CMD_READ_PARAMS command */
6710 		cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6711 		nv_sgp_csr_write(nvc, cmd);
6712 
6713 		DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6714 
6715 		/* poll for command completion */
6716 		start = gethrtime();
6717 		end = start + NV_SGP_CMD_TIMEOUT;
6718 		for (;;) {
6719 			status = nv_sgp_csr_read(nvc);
6720 
6721 			/* break on error */
6722 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6723 				NVLOG(NVDBG_VERBOSE, nvc, NULL,
6724 				    "Command error during initialization",
6725 				    NULL);
6726 				rval = NV_FAILURE;
6727 				break;
6728 			}
6729 
6730 			/* command processing is taking place */
6731 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6732 				if (SGPIO_CSR_SEQ(status) != seq) {
6733 					NVLOG(NVDBG_VERBOSE, nvc, NULL,
6734 					    "Sequence number change error",
6735 					    NULL);
6736 				}
6737 
6738 				break;
6739 			}
6740 
6741 			/* if completion not detected in 2000ms ... */
6742 
6743 			if (gethrtime() > end)
6744 				break;
6745 
6746 			/* wait 400 ns before checking again */
6747 			NV_DELAY_NSEC(400);
6748 		}
6749 	}
6750 
6751 	if (rval == NV_FAILURE)
6752 		return (rval);
6753 
6754 	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6755 		NVLOG(NVDBG_VERBOSE, nvc, NULL,
6756 		    "SGPIO logic not operational after init - state %d",
6757 		    SGPIO_CSR_SSTAT(status));
6758 		/*
6759 		 * Should return (NV_FAILURE) but the hardware can be
6760 		 * operational even if the SGPIO Status does not indicate
6761 		 * this.
6762 		 */
6763 	}
6764 
6765 	/*
6766 	 * NVIDIA recommends reading the supported drive count even
6767 	 * though they also indicate that it is always 4 at this time.
6768 	 */
6769 	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6770 	if (drive_count != SGPIO_DRV_CNT_VALUE) {
6771 		NVLOG(NVDBG_INIT, nvc, NULL,
6772 		    "SGPIO reported undocumented drive count - %d",
6773 		    drive_count);
6774 	}
6775 
6776 	NVLOG(NVDBG_INIT, nvc, NULL,
6777 	    "initialized ctlr: %d csr: 0x%08x",
6778 	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr);
6779 
6780 	return (rval);
6781 }
6782 
6783 static int
6784 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6785 {
6786 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6787 
6788 	if (cmn == NULL)
6789 		return (NV_FAILURE);
6790 
6791 	mutex_enter(&cmn->nvs_slock);
6792 	cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6793 	mutex_exit(&cmn->nvs_slock);
6794 
6795 	return (NV_SUCCESS);
6796 }
6797 
6798 /*
6799  * nv_sgp_csr_read
6800  * This is just a 32-bit port read from the value that was obtained from the
6801  * PCI config space.
6802  *
6803  * XXX It was advised to use the in[bwl] function for this, even though they
6804  * are obsolete interfaces.
6805  */
6806 static int
6807 nv_sgp_csr_read(nv_ctl_t *nvc)
6808 {
6809 	return (inl(nvc->nvc_sgp_csr));
6810 }
6811 
6812 /*
6813  * nv_sgp_csr_write
6814  * This is just a 32-bit I/O port write.  The port number was obtained from
6815  * the PCI config space.
6816  *
6817  * XXX It was advised to use the out[bwl] function for this, even though they
6818  * are obsolete interfaces.
6819  */
6820 static void
6821 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6822 {
6823 	outl(nvc->nvc_sgp_csr, val);
6824 }
6825 
6826 /*
6827  * nv_sgp_write_data
6828  * Cause SGPIO to send Control Block data
6829  */
6830 static int
6831 nv_sgp_write_data(nv_ctl_t *nvc)
6832 {
6833 	hrtime_t start, end;
6834 	uint32_t status;
6835 	uint32_t cmd;
6836 
6837 	/* issue command */
6838 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6839 	nv_sgp_csr_write(nvc, cmd);
6840 
6841 	/* poll for completion */
6842 	start = gethrtime();
6843 	end = start + NV_SGP_CMD_TIMEOUT;
6844 	for (;;) {
6845 		status = nv_sgp_csr_read(nvc);
6846 
6847 		/* break on error completion */
6848 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6849 			break;
6850 
6851 		/* break on successful completion */
6852 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6853 			break;
6854 
6855 		/* Wait 400 ns and try again */
6856 		NV_DELAY_NSEC(400);
6857 
6858 		if (gethrtime() > end)
6859 			break;
6860 	}
6861 
6862 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6863 		return (NV_SUCCESS);
6864 
6865 	return (NV_FAILURE);
6866 }
6867 
6868 /*
6869  * nv_sgp_activity_led_ctl
6870  * This is run as a taskq.  It wakes up at a fixed interval and checks to
6871  * see if any of the activity LEDs need to be changed.
6872  */
6873 static void
6874 nv_sgp_activity_led_ctl(void *arg)
6875 {
6876 	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6877 	nv_sgp_cmn_t *cmn;
6878 	volatile nv_sgp_cb_t *cbp;
6879 	clock_t ticks;
6880 	uint8_t drv_leds;
6881 	uint32_t old_leds;
6882 	uint32_t new_led_state;
6883 	int i;
6884 
6885 	cmn = nvc->nvc_sgp_cmn;
6886 	cbp = nvc->nvc_sgp_cbp;
6887 
6888 	do {
6889 		/* save off the old state of all of the LEDs */
6890 		old_leds = cbp->sgpio0_tr;
6891 
6892 		DTRACE_PROBE3(sgpio__activity__state,
6893 		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6894 		    int, old_leds);
6895 
6896 		new_led_state = 0;
6897 
6898 		/* for each drive */
6899 		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6900 
6901 			/* get the current state of the LEDs for the drive */
6902 			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6903 
6904 			if ((cmn->nvs_connected & (1 << i)) == 0) {
6905 				/* if not connected, turn off activity */
6906 				drv_leds &= ~TR_ACTIVE_MASK;
6907 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6908 
6909 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6910 				new_led_state |=
6911 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6912 
6913 				continue;
6914 			}
6915 
6916 			if ((cmn->nvs_activity & (1 << i)) == 0) {
6917 				/* connected, but not active */
6918 				drv_leds &= ~TR_ACTIVE_MASK;
6919 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6920 
6921 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6922 				new_led_state |=
6923 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6924 
6925 				continue;
6926 			}
6927 
6928 			/* connected and active */
6929 			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6930 				/* was enabled, so disable */
6931 				drv_leds &= ~TR_ACTIVE_MASK;
6932 				drv_leds |=
6933 				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6934 
6935 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6936 				new_led_state |=
6937 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6938 			} else {
6939 				/* was disabled, so enable */
6940 				drv_leds &= ~TR_ACTIVE_MASK;
6941 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6942 
6943 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6944 				new_led_state |=
6945 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6946 			}
6947 
6948 			/*
6949 			 * clear the activity bit
6950 			 * if there is drive activity again within the
6951 			 * loop interval (now 1/16 second), nvs_activity
6952 			 * will be reset and the "connected and active"
6953 			 * condition above will cause the LED to blink
6954 			 * off and on at the loop interval rate.  The
6955 			 * rate may be increased (interval shortened) as
6956 			 * long as it is not more than 1/30 second.
6957 			 */
6958 			mutex_enter(&cmn->nvs_slock);
6959 			cmn->nvs_activity &= ~(1 << i);
6960 			mutex_exit(&cmn->nvs_slock);
6961 		}
6962 
6963 		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6964 
6965 		/* write out LED values */
6966 
6967 		mutex_enter(&cmn->nvs_slock);
6968 		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6969 		cbp->sgpio0_tr |= new_led_state;
6970 		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6971 		mutex_exit(&cmn->nvs_slock);
6972 
6973 		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6974 			NVLOG(NVDBG_VERBOSE, nvc, NULL,
6975 			    "nv_sgp_write_data failure updating active LED",
6976 			    NULL);
6977 		}
6978 
6979 		/* now rest for the interval */
6980 		mutex_enter(&cmn->nvs_tlock);
6981 		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6982 		if (ticks > 0)
6983 			(void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6984 			    ticks, TR_CLOCK_TICK);
6985 		mutex_exit(&cmn->nvs_tlock);
6986 	} while (ticks > 0);
6987 }
6988 
6989 /*
6990  * nv_sgp_drive_connect
6991  * Set the flag used to indicate that the drive is attached to the HBA.
6992  * Used to let the taskq know that it should turn the Activity LED on.
6993  */
6994 static void
6995 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6996 {
6997 	nv_sgp_cmn_t *cmn;
6998 
6999 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7000 		return;
7001 	cmn = nvc->nvc_sgp_cmn;
7002 
7003 	mutex_enter(&cmn->nvs_slock);
7004 	cmn->nvs_connected |= (1 << drive);
7005 	mutex_exit(&cmn->nvs_slock);
7006 }
7007 
7008 /*
7009  * nv_sgp_drive_disconnect
7010  * Clears the flag used to indicate that the drive is no longer attached
7011  * to the HBA.  Used to let the taskq know that it should turn the
7012  * Activity LED off.  The flag that indicates that the drive is in use is
7013  * also cleared.
7014  */
7015 static void
7016 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
7017 {
7018 	nv_sgp_cmn_t *cmn;
7019 
7020 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7021 		return;
7022 	cmn = nvc->nvc_sgp_cmn;
7023 
7024 	mutex_enter(&cmn->nvs_slock);
7025 	cmn->nvs_connected &= ~(1 << drive);
7026 	cmn->nvs_activity &= ~(1 << drive);
7027 	mutex_exit(&cmn->nvs_slock);
7028 }
7029 
7030 /*
7031  * nv_sgp_drive_active
7032  * Sets the flag used to indicate that the drive has been accessed and the
7033  * LED should be flicked off, then on.  It is cleared at a fixed time
7034  * interval by the LED taskq and set by the sata command start.
7035  */
7036 static void
7037 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
7038 {
7039 	nv_sgp_cmn_t *cmn;
7040 
7041 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7042 		return;
7043 	cmn = nvc->nvc_sgp_cmn;
7044 
7045 	DTRACE_PROBE1(sgpio__active, int, drive);
7046 
7047 	mutex_enter(&cmn->nvs_slock);
7048 	cmn->nvs_activity |= (1 << drive);
7049 	mutex_exit(&cmn->nvs_slock);
7050 }
7051 
7052 
7053 /*
7054  * nv_sgp_locate
7055  * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
7056  * maintained in the SGPIO Control Block.
7057  */
7058 static void
7059 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
7060 {
7061 	uint8_t leds;
7062 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7063 	nv_sgp_cmn_t *cmn;
7064 
7065 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7066 		return;
7067 	cmn = nvc->nvc_sgp_cmn;
7068 
7069 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7070 		return;
7071 
7072 	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
7073 
7074 	mutex_enter(&cmn->nvs_slock);
7075 
7076 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7077 
7078 	leds &= ~TR_LOCATE_MASK;
7079 	leds |= TR_LOCATE_SET(value);
7080 
7081 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7082 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7083 
7084 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7085 
7086 	mutex_exit(&cmn->nvs_slock);
7087 
7088 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7089 		nv_cmn_err(CE_WARN, nvc, NULL,
7090 		    "nv_sgp_write_data failure updating OK2RM/Locate LED");
7091 	}
7092 }
7093 
7094 /*
7095  * nv_sgp_error
7096  * Turns the Error/Failure LED off or on for a particular drive.  State is
7097  * maintained in the SGPIO Control Block.
7098  */
7099 static void
7100 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7101 {
7102 	uint8_t leds;
7103 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7104 	nv_sgp_cmn_t *cmn;
7105 
7106 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7107 		return;
7108 	cmn = nvc->nvc_sgp_cmn;
7109 
7110 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7111 		return;
7112 
7113 	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7114 
7115 	mutex_enter(&cmn->nvs_slock);
7116 
7117 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7118 
7119 	leds &= ~TR_ERROR_MASK;
7120 	leds |= TR_ERROR_SET(value);
7121 
7122 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7123 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7124 
7125 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7126 
7127 	mutex_exit(&cmn->nvs_slock);
7128 
7129 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7130 		nv_cmn_err(CE_WARN, nvc, NULL,
7131 		    "nv_sgp_write_data failure updating Fail/Error LED");
7132 	}
7133 }
7134 
7135 static void
7136 nv_sgp_cleanup(nv_ctl_t *nvc)
7137 {
7138 	int drive, i;
7139 	uint8_t drv_leds;
7140 	uint32_t led_state;
7141 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7142 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7143 	extern void psm_unmap_phys(caddr_t, size_t);
7144 
7145 	/*
7146 	 * If the SGPIO Control Block isn't mapped or the shared data
7147 	 * structure isn't present in this instance, there isn't much that
7148 	 * can be cleaned up.
7149 	 */
7150 	if ((cb == NULL) || (cmn == NULL))
7151 		return;
7152 
7153 	/* turn off activity LEDs for this controller */
7154 	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7155 
7156 	/* get the existing LED state */
7157 	led_state = cb->sgpio0_tr;
7158 
7159 	/* turn off port 0 */
7160 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7161 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7162 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7163 
7164 	/* turn off port 1 */
7165 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7166 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7167 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7168 
7169 	/* set the new led state, which should turn off this ctrl's LEDs */
7170 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7171 	(void) nv_sgp_write_data(nvc);
7172 
7173 	/* clear the controller's in use bit */
7174 	mutex_enter(&cmn->nvs_slock);
7175 	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7176 	mutex_exit(&cmn->nvs_slock);
7177 
7178 	if (cmn->nvs_in_use == 0) {
7179 		/* if all "in use" bits cleared, take everything down */
7180 
7181 		if (cmn->nvs_taskq != NULL) {
7182 			/* allow activity taskq to exit */
7183 			cmn->nvs_taskq_delay = 0;
7184 			cv_broadcast(&cmn->nvs_cv);
7185 
7186 			/* then destroy it */
7187 			ddi_taskq_destroy(cmn->nvs_taskq);
7188 		}
7189 
7190 		/* turn off all of the LEDs */
7191 		cb->sgpio0_tr = 0;
7192 		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7193 		(void) nv_sgp_write_data(nvc);
7194 
7195 		cb->sgpio_sr = NULL;
7196 
7197 		/* zero out the CBP to cmn mapping */
7198 		for (i = 0; i < NV_MAX_CBPS; i++) {
7199 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7200 				nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7201 				break;
7202 			}
7203 
7204 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7205 				break;
7206 		}
7207 
7208 		/* free resources */
7209 		cv_destroy(&cmn->nvs_cv);
7210 		mutex_destroy(&cmn->nvs_tlock);
7211 		mutex_destroy(&cmn->nvs_slock);
7212 
7213 		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7214 	}
7215 
7216 	nvc->nvc_sgp_cmn = NULL;
7217 
7218 	/* unmap the SGPIO Control Block */
7219 	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7220 }
7221 #endif	/* SGPIO_SUPPORT */
7222