xref: /titanic_51/usr/src/uts/common/io/scsi/adapters/smartpqi/smartpqi_hba.c (revision 58f4054fce841e31413b3ec9f94e78e911e83772)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2018 Nexenta Systems, Inc.
14  * Copyright 2021 RackTop Systems, Inc.
15  */
16 
17 /*
18  * This file contains all routines necessary to interface with SCSA trans.
19  */
20 #include <smartpqi.h>
21 
22 /*
23  * []------------------------------------------------------------------[]
24  * | Forward declarations for SCSA trans routines.			|
25  * []------------------------------------------------------------------[]
26  */
27 static int pqi_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
28     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
29 static void pqi_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
30     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
31 static int pqi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
32 static int pqi_scsi_reset(struct scsi_address *ap, int level);
33 static int pqi_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
34 static int pqi_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
35 static int pqi_scsi_setcap(struct scsi_address *ap, char *cap, int value,
36     int tgtonly);
37 static struct scsi_pkt *pqi_init_pkt(struct scsi_address *ap,
38     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, int tgtlen,
39     int flags,  int (*callback)(), caddr_t arg);
40 static void pqi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
41 static void pqi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
42 static void pqi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
43 static int pqi_reset_notify(struct scsi_address *ap, int flag,
44     void (*callback)(caddr_t), caddr_t arg);
45 static int pqi_quiesce(dev_info_t *dip);
46 static int pqi_unquiesce(dev_info_t *dip);
47 static int pqi_bus_config(dev_info_t *pdip, uint_t flag,
48     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
49 
50 /* ---- Support method declaration ---- */
51 static int config_one(dev_info_t *pdip, pqi_state_t s,  pqi_device_t,
52     dev_info_t **childp);
53 static void abort_all(struct scsi_address *ap, pqi_state_t s);
54 static int cmd_ext_alloc(pqi_cmd_t cmd, int kf);
55 static void cmd_ext_free(pqi_cmd_t cmd);
56 static boolean_t is_physical_dev(pqi_device_t d);
57 static void cmd_timeout_scan(void *);
58 
59 int
60 smartpqi_register_hba(pqi_state_t s)
61 {
62 	scsi_hba_tran_t		*tran;
63 	int			flags;
64 	char			iport_str[16];
65 	int			instance = ddi_get_instance(s->s_dip);
66 
67 	tran = s->s_tran = scsi_hba_tran_alloc(s->s_dip, SCSI_HBA_CANSLEEP);
68 	if (tran == NULL)
69 		return (FALSE);
70 
71 	tran->tran_hba_private		= s;
72 	tran->tran_tgt_private		= NULL;
73 
74 	tran->tran_tgt_init		= pqi_scsi_tgt_init;
75 	tran->tran_tgt_free		= pqi_scsi_tgt_free;
76 	tran->tran_tgt_probe		= scsi_hba_probe;
77 
78 	tran->tran_start		= pqi_start;
79 	tran->tran_reset		= pqi_scsi_reset;
80 	tran->tran_abort		= pqi_scsi_abort;
81 	tran->tran_getcap		= pqi_scsi_getcap;
82 	tran->tran_setcap		= pqi_scsi_setcap;
83 	tran->tran_bus_config		= pqi_bus_config;
84 
85 	tran->tran_init_pkt		= pqi_init_pkt;
86 	tran->tran_destroy_pkt		= pqi_destroy_pkt;
87 	tran->tran_dmafree		= pqi_dmafree;
88 	tran->tran_sync_pkt		= pqi_sync_pkt;
89 
90 	tran->tran_reset_notify		= pqi_reset_notify;
91 	tran->tran_quiesce		= pqi_quiesce;
92 	tran->tran_unquiesce		= pqi_unquiesce;
93 	tran->tran_bus_reset		= NULL;
94 
95 	tran->tran_add_eventcall	= NULL;
96 	tran->tran_get_eventcookie	= NULL;
97 	tran->tran_post_event		= NULL;
98 	tran->tran_remove_eventcall	= NULL;
99 	tran->tran_bus_config		= pqi_bus_config;
100 	tran->tran_interconnect_type	= INTERCONNECT_SAS;
101 
102 	/*
103 	 * scsi_vhci needs to have "initiator-port" set, but doesn't
104 	 * seem to care what it's set to. iSCSI uses the InitiatorName
105 	 * whereas mpt_sas uses the WWN port id, but this HBA doesn't
106 	 * have such a value. So, for now the instance number will be used.
107 	 */
108 	(void) snprintf(iport_str, sizeof (iport_str), "0x%x", instance);
109 	if (ddi_prop_update_string(DDI_DEV_T_NONE, s->s_dip,
110 	    SCSI_ADDR_PROP_INITIATOR_PORT, iport_str) != DDI_PROP_SUCCESS) {
111 		cmn_err(CE_WARN, "%s: Failed to create prop (%s) on %d\n",
112 		    __func__, SCSI_ADDR_PROP_INITIATOR_PORT, instance);
113 	}
114 
115 	flags = SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB;
116 	if (scsi_hba_attach_setup(s->s_dip, &s->s_msg_dma_attr, tran,
117 	    flags) != DDI_SUCCESS) {
118 		dev_err(s->s_dip, CE_NOTE, "scsi_hba_attach_setup failed");
119 		scsi_hba_tran_free(s->s_tran);
120 		s->s_tran = NULL;
121 		return (FALSE);
122 	}
123 
124 	if (!s->s_disable_mpxio) {
125 		if (mdi_phci_register(MDI_HCI_CLASS_SCSI, s->s_dip, 0) !=
126 		    MDI_SUCCESS) {
127 			s->s_disable_mpxio = 1;
128 		}
129 	}
130 
131 	s->s_cmd_timeout = timeout(cmd_timeout_scan, s,
132 	    CMD_TIMEOUT_SCAN_SECS * drv_usectohz(MICROSEC));
133 
134 	return (TRUE);
135 }
136 
137 void
138 smartpqi_unregister_hba(pqi_state_t s)
139 {
140 	if (!s->s_disable_mpxio)
141 		(void) mdi_phci_unregister(s->s_dip, 0);
142 
143 	if (s->s_cmd_timeout != NULL) {
144 		(void) untimeout(s->s_cmd_timeout);
145 		s->s_cmd_timeout = NULL;
146 	}
147 
148 	if (s->s_tran == NULL)
149 		return;
150 	scsi_hba_tran_free(s->s_tran);
151 	s->s_tran = NULL;
152 }
153 
154 /*ARGSUSED*/
155 static int
156 pqi_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
157     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
158 {
159 	pqi_device_t	d;
160 	pqi_state_t	s	= hba_tran->tran_hba_private;
161 	mdi_pathinfo_t	*pip;
162 	int		type;
163 	char		*ua;
164 
165 	if ((ua = scsi_device_unit_address(sd)) == NULL) {
166 		return (DDI_FAILURE);
167 	}
168 
169 	if ((d = pqi_find_target_ua(s, ua)) == NULL) {
170 		return (DDI_FAILURE);
171 	}
172 
173 	scsi_device_hba_private_set(sd, d);
174 
175 	type = mdi_get_component_type(tgt_dip);
176 	if (type == MDI_COMPONENT_CLIENT) {
177 		char	wwid_str[64];
178 
179 		if ((pip = (mdi_pathinfo_t *)sd->sd_private) == NULL)
180 			return (DDI_NOT_WELL_FORMED);
181 
182 		(void) snprintf(wwid_str, sizeof (wwid_str), "%" PRIx64,
183 		    d->pd_wwid);
184 		(void) mdi_prop_update_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
185 		    wwid_str);
186 	}
187 
188 	return (DDI_SUCCESS);
189 }
190 
191 /*ARGSUSED*/
192 static void
193 pqi_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
194     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
195 {
196 }
197 
198 /*
199  * Notes:
200  *      - transport the command to the addressed SCSI target/lun device
201  *      - normal operation is to schedule the command to be transported,
202  *        and return TRAN_ACCEPT if this is successful.
203  *      - if NO_INTR, tran_start must poll device for command completion
204  */
205 /*ARGSUSED*/
206 static int
207 pqi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
208 {
209 	boolean_t	poll	= ((pkt->pkt_flags & FLAG_NOINTR) != 0);
210 	int		rc;
211 	pqi_cmd_t	cmd	= PKT2CMD(pkt);
212 	pqi_state_t	s	= ap->a_hba_tran->tran_hba_private;
213 
214 	ASSERT3P(cmd->pc_pkt, ==, pkt);
215 	ASSERT3P(cmd->pc_softc, ==, s);
216 
217 	if (pqi_is_offline(s) || !cmd->pc_device->pd_online)
218 		return (TRAN_FATAL_ERROR);
219 
220 	/*
221 	 * Reinitialize some fields because the packet may have been
222 	 * resubmitted.
223 	 */
224 	pkt->pkt_reason = CMD_CMPLT;
225 	pkt->pkt_state = 0;
226 	pkt->pkt_statistics = 0;
227 
228 	/* ---- Zero status byte ---- */
229 	*(pkt->pkt_scbp) = 0;
230 
231 	if ((cmd->pc_flags & PQI_FLAG_DMA_VALID) != 0) {
232 		ASSERT(cmd->pc_dma_count);
233 		pkt->pkt_resid = cmd->pc_dma_count;
234 
235 		/* ---- Sync consistent packets first (only write data) ---- */
236 		if (((cmd->pc_flags & PQI_FLAG_IO_IOPB) != 0) ||
237 		    ((cmd->pc_flags & PQI_FLAG_IO_READ) == 0)) {
238 			(void) ddi_dma_sync(cmd->pc_dmahdl, 0, 0,
239 			    DDI_DMA_SYNC_FORDEV);
240 		}
241 	}
242 
243 	mutex_enter(&s->s_mutex);
244 	if (HBA_IS_QUIESCED(s) && !poll) {
245 		mutex_exit(&s->s_mutex);
246 		return (TRAN_BUSY);
247 	}
248 	mutex_exit(&s->s_mutex);
249 
250 	pqi_cmd_sm(cmd, PQI_CMD_QUEUED, B_TRUE);
251 
252 	rc = pqi_transport_command(s, cmd);
253 
254 	if (poll) {
255 		boolean_t	qnotify;
256 
257 		if (rc == TRAN_ACCEPT) {
258 			uint32_t	old_state;
259 			int		timeo;
260 
261 			timeo = pkt->pkt_time ? pkt->pkt_time :
262 			    SCSI_POLL_TIMEOUT;
263 			timeo *= MILLISEC / 2;
264 			old_state = pqi_disable_intr(s);
265 			do {
266 				drv_usecwait(MILLISEC / 2);
267 				pqi_process_io_intr(s, &s->s_queue_groups[0]);
268 				if (--timeo == 0) {
269 					pkt->pkt_state |= STAT_TIMEOUT;
270 					pkt->pkt_reason = CMD_TIMEOUT;
271 					break;
272 				}
273 			} while (pkt->pkt_state == 0);
274 			pqi_enable_intr(s, old_state);
275 		}
276 
277 		scsi_hba_pkt_comp(pkt);
278 
279 		mutex_enter(&s->s_mutex);
280 		qnotify = HBA_QUIESCED_PENDING(s);
281 		mutex_exit(&s->s_mutex);
282 
283 		if (qnotify)
284 			pqi_quiesced_notify(s);
285 	}
286 
287 	return (rc);
288 }
289 
290 static int
291 pqi_scsi_reset(struct scsi_address *ap, int level)
292 {
293 	pqi_device_t	d;
294 	pqi_state_t	s;
295 	int		rval = FALSE;
296 
297 	s = ap->a_hba_tran->tran_hba_private;
298 	switch (level) {
299 	case RESET_TARGET:
300 	case RESET_LUN:
301 		if ((d = scsi_device_hba_private_get(ap->a.a_sd)) == NULL)
302 			break;
303 
304 		if (pqi_lun_reset(s, d) == B_TRUE)
305 			rval = TRUE;
306 		break;
307 
308 	case RESET_BUS:
309 	case RESET_ALL:
310 		for (d = list_head(&s->s_devnodes); d != NULL;
311 		    d = list_next(&s->s_devnodes, d)) {
312 			(void) pqi_lun_reset(s, d);
313 		}
314 		rval = TRUE;
315 		break;
316 	}
317 	return (rval);
318 }
319 
320 /*
321  * abort handling:
322  *
323  * Notes:
324  *      - if pkt is not NULL, abort just that command
325  *      - if pkt is NULL, abort all outstanding commands for target
326  */
327 static int
328 pqi_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
329 {
330 	boolean_t	qnotify	= B_FALSE;
331 	pqi_state_t	s	= ADDR2PQI(ap);
332 
333 	if (pkt != NULL) {
334 		/* ---- Abort single command ---- */
335 		pqi_cmd_t	cmd = PKT2CMD(pkt);
336 
337 		mutex_enter(&cmd->pc_device->pd_mutex);
338 		pqi_fail_cmd(cmd, CMD_ABORTED, STAT_ABORTED);
339 		mutex_exit(&cmd->pc_device->pd_mutex);
340 	} else {
341 		abort_all(ap, s);
342 	}
343 	qnotify = HBA_QUIESCED_PENDING(s);
344 
345 	if (qnotify)
346 		pqi_quiesced_notify(s);
347 	return (1);
348 }
349 
350 /*
351  * capability handling:
352  * (*tran_getcap).  Get the capability named, and return its value.
353  */
354 /*ARGSUSED*/
355 static int
356 pqi_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
357 {
358 	pqi_state_t s = ap->a_hba_tran->tran_hba_private;
359 
360 	if (cap == NULL)
361 		return (-1);
362 	switch (scsi_hba_lookup_capstr(cap)) {
363 	case SCSI_CAP_LUN_RESET:
364 		return ((s->s_flags & PQI_HBA_LUN_RESET_CAP) != 0);
365 	case SCSI_CAP_ARQ:
366 		return ((s->s_flags & PQI_HBA_AUTO_REQUEST_SENSE) != 0);
367 	case SCSI_CAP_UNTAGGED_QING:
368 		return (1);
369 	default:
370 		return (-1);
371 	}
372 }
373 
374 /*
375  * (*tran_setcap).  Set the capability named to the value given.
376  */
377 /*ARGSUSED*/
378 static int
379 pqi_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
380 {
381 	pqi_state_t	s	= ADDR2PQI(ap);
382 	int		rval	= FALSE;
383 
384 	if (cap == NULL)
385 		return (-1);
386 
387 	switch (scsi_hba_lookup_capstr(cap)) {
388 	case SCSI_CAP_ARQ:
389 		if (value)
390 			s->s_flags |= PQI_HBA_AUTO_REQUEST_SENSE;
391 		else
392 			s->s_flags &= ~PQI_HBA_AUTO_REQUEST_SENSE;
393 		rval = 1;
394 		break;
395 
396 	case SCSI_CAP_LUN_RESET:
397 		if (value)
398 			s->s_flags |= PQI_HBA_LUN_RESET_CAP;
399 		else
400 			s->s_flags &= ~PQI_HBA_LUN_RESET_CAP;
401 		break;
402 
403 	default:
404 		break;
405 	}
406 
407 	return (rval);
408 }
409 
410 /*ARGSUSED*/
411 int
412 pqi_cache_constructor(void *buf, void *un, int flags)
413 {
414 	pqi_cmd_t		c	= (pqi_cmd_t)buf;
415 	pqi_state_t		s	= un;
416 	int			(*callback)(caddr_t);
417 
418 	c->pc_softc = s;
419 	callback = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
420 
421 	/* ---- Allocate a DMA handle for data transfers ---- */
422 	if (ddi_dma_alloc_handle(s->s_dip, &s->s_msg_dma_attr, callback,
423 	    NULL, &c->pc_dmahdl) != DDI_SUCCESS) {
424 		dev_err(s->s_dip, CE_WARN, "Failed to alloc dma handle");
425 		return (-1);
426 	}
427 	pqi_cmd_sm(c, PQI_CMD_CONSTRUCT, B_TRUE);
428 
429 	return (0);
430 }
431 
432 /*ARGSUSED*/
433 void
434 pqi_cache_destructor(void *buf, void *un)
435 {
436 	pqi_cmd_t	cmd = buf;
437 	if (cmd->pc_dmahdl != NULL) {
438 		(void) ddi_dma_unbind_handle(cmd->pc_dmahdl);
439 		ddi_dma_free_handle(&cmd->pc_dmahdl);
440 		cmd->pc_dmahdl = NULL;
441 	}
442 }
443 
444 /*
445  * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
446  *
447  * One of three possibilities:
448  *      - allocate scsi_pkt
449  *      - allocate scsi_pkt and DMA resources
450  *      - allocate DMA resources to an already-allocated pkt
451  */
452 /*ARGSUSED*/
453 static struct scsi_pkt *
454 pqi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
455     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
456     int (*callback)(), caddr_t arg)
457 {
458 	pqi_cmd_t	cmd;
459 	pqi_state_t	s;
460 	int		kf = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
461 	boolean_t	is_new = B_FALSE;
462 	int		rc;
463 	int		i;
464 	pqi_device_t	devp;
465 
466 	s = ap->a_hba_tran->tran_hba_private;
467 
468 	if (pkt == NULL) {
469 		ddi_dma_handle_t	saved_dmahdl;
470 		pqi_cmd_state_t		saved_state;
471 
472 		if ((devp = scsi_device_hba_private_get(ap->a.a_sd)) == NULL)
473 			return (NULL);
474 		if ((cmd = kmem_cache_alloc(s->s_cmd_cache, kf)) == NULL)
475 			return (NULL);
476 
477 		is_new = B_TRUE;
478 		saved_dmahdl = cmd->pc_dmahdl;
479 		saved_state = cmd->pc_cmd_state;
480 
481 		(void) memset(cmd, 0, sizeof (*cmd));
482 
483 		cmd->pc_dmahdl = saved_dmahdl;
484 		cmd->pc_cmd_state = saved_state;
485 
486 		cmd->pc_device = devp;
487 		cmd->pc_pkt = &cmd->pc_cached_pkt;
488 		cmd->pc_softc = s;
489 		cmd->pc_tgtlen = tgtlen;
490 		cmd->pc_statuslen = statuslen;
491 		cmd->pc_cmdlen = cmdlen;
492 		cmd->pc_dma_count = 0;
493 
494 		pkt = cmd->pc_pkt;
495 		pkt->pkt_ha_private = cmd;
496 		pkt->pkt_address = *ap;
497 		pkt->pkt_scbp = (uint8_t *)&cmd->pc_cmd_scb;
498 		pkt->pkt_cdbp = cmd->pc_cdb;
499 		pkt->pkt_private = (opaque_t)cmd->pc_tgt_priv;
500 		if (pkt->pkt_time == 0)
501 			pkt->pkt_time = SCSI_POLL_TIMEOUT;
502 
503 		if (cmdlen > sizeof (cmd->pc_cdb) ||
504 		    statuslen > sizeof (cmd->pc_cmd_scb) ||
505 		    tgtlen > sizeof (cmd->pc_tgt_priv)) {
506 			if (cmd_ext_alloc(cmd, kf) != DDI_SUCCESS) {
507 				dev_err(s->s_dip, CE_WARN,
508 				    "extent allocation failed");
509 				goto out;
510 			}
511 		}
512 	} else {
513 		cmd = PKT2CMD(pkt);
514 		cmd->pc_flags &= PQI_FLAGS_PERSISTENT;
515 	}
516 	pqi_cmd_sm(cmd, PQI_CMD_INIT, B_TRUE);
517 
518 	/* ---- Handle partial DMA transfer ---- */
519 	if (cmd->pc_nwin > 0) {
520 		if (++cmd->pc_winidx >= cmd->pc_nwin)
521 			return (NULL);
522 		if (ddi_dma_getwin(cmd->pc_dmahdl, cmd->pc_winidx,
523 		    &cmd->pc_dma_offset, &cmd->pc_dma_len, &cmd->pc_dmac,
524 		    &cmd->pc_dmaccount) == DDI_FAILURE)
525 			return (NULL);
526 		goto handle_dma_cookies;
527 	}
528 
529 	/* ---- Setup data buffer ---- */
530 	if (bp != NULL && bp->b_bcount > 0 &&
531 	    (cmd->pc_flags & PQI_FLAG_DMA_VALID) == 0) {
532 		int	dma_flags;
533 
534 		ASSERT(cmd->pc_dmahdl != NULL);
535 
536 		if ((bp->b_flags & B_READ) != 0) {
537 			cmd->pc_flags |= PQI_FLAG_IO_READ;
538 			dma_flags = DDI_DMA_READ;
539 		} else {
540 			cmd->pc_flags &= ~PQI_FLAG_IO_READ;
541 			dma_flags = DDI_DMA_WRITE;
542 		}
543 		if ((flags & PKT_CONSISTENT) != 0) {
544 			cmd->pc_flags |= PQI_FLAG_IO_IOPB;
545 			dma_flags |= DDI_DMA_CONSISTENT;
546 		}
547 		if ((flags & PKT_DMA_PARTIAL) != 0) {
548 			dma_flags |= DDI_DMA_PARTIAL;
549 		}
550 		rc = ddi_dma_buf_bind_handle(cmd->pc_dmahdl, bp,
551 		    dma_flags, callback, arg, &cmd->pc_dmac,
552 		    &cmd->pc_dmaccount);
553 
554 		if (rc == DDI_DMA_PARTIAL_MAP) {
555 			(void) ddi_dma_numwin(cmd->pc_dmahdl, &cmd->pc_nwin);
556 			cmd->pc_winidx = 0;
557 			(void) ddi_dma_getwin(cmd->pc_dmahdl, cmd->pc_winidx,
558 			    &cmd->pc_dma_offset, &cmd->pc_dma_len,
559 			    &cmd->pc_dmac, &cmd->pc_dmaccount);
560 		} else if (rc != 0 && rc != DDI_DMA_MAPPED) {
561 			switch (rc) {
562 			case DDI_DMA_NORESOURCES:
563 				bioerror(bp, 0);
564 				break;
565 			case DDI_DMA_BADATTR:
566 			case DDI_DMA_NOMAPPING:
567 				bioerror(bp, EFAULT);
568 				break;
569 			case DDI_DMA_TOOBIG:
570 			default:
571 				bioerror(bp, EINVAL);
572 				break;
573 			}
574 			goto out;
575 		}
576 
577 handle_dma_cookies:
578 		ASSERT(cmd->pc_dmaccount > 0);
579 		if (cmd->pc_dmaccount >
580 		    (sizeof (cmd->pc_cached_cookies) /
581 		    sizeof (ddi_dma_cookie_t))) {
582 			dev_err(s->s_dip, CE_WARN,
583 			    "invalid cookie count: %d", cmd->pc_dmaccount);
584 			goto out;
585 		}
586 		if (cmd->pc_dmaccount >
587 		    (s->s_sg_chain_buf_length / sizeof (pqi_sg_entry_t))) {
588 			dev_err(s->s_dip, CE_WARN,
589 			    "Cookie(0x%x) verses SG(0x%" PRIx64 ") mismatch",
590 			    cmd->pc_dmaccount,
591 			    s->s_sg_chain_buf_length / sizeof (pqi_sg_entry_t));
592 			goto out;
593 		}
594 
595 		cmd->pc_flags |= PQI_FLAG_DMA_VALID;
596 		cmd->pc_dma_count = cmd->pc_dmac.dmac_size;
597 		cmd->pc_cached_cookies[0] = cmd->pc_dmac;
598 
599 		for (i = 1; i < cmd->pc_dmaccount; i++) {
600 			ddi_dma_nextcookie(cmd->pc_dmahdl, &cmd->pc_dmac);
601 			cmd->pc_cached_cookies[i] = cmd->pc_dmac;
602 			cmd->pc_dma_count += cmd->pc_dmac.dmac_size;
603 		}
604 
605 		pkt->pkt_resid = bp->b_bcount - cmd->pc_dma_count;
606 	}
607 
608 	return (pkt);
609 out:
610 	pqi_cmd_sm(cmd, PQI_CMD_FATAL, B_TRUE);
611 	if (is_new == B_TRUE)
612 		pqi_destroy_pkt(ap, pkt);
613 	return (NULL);
614 }
615 
616 /*
617  * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
618  *
619  * Notes:
620  *      - also frees DMA resources if allocated
621  *      - implicit DMA synchonization
622  */
623 /*ARGSUSED*/
624 static void
625 pqi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
626 {
627 	pqi_cmd_t	c = PKT2CMD(pkt);
628 	pqi_state_t	s = ADDR2PQI(ap);
629 
630 	if ((c->pc_flags & PQI_FLAG_DMA_VALID) != 0) {
631 		c->pc_flags &= ~PQI_FLAG_DMA_VALID;
632 		(void) ddi_dma_unbind_handle(c->pc_dmahdl);
633 	}
634 	cmd_ext_free(c);
635 	pqi_cmd_sm(c, PQI_CMD_DESTRUCT, B_TRUE);
636 
637 	kmem_cache_free(s->s_cmd_cache, c);
638 }
639 
640 /*
641  * tran_dmafree(9E) - deallocate DMA resources allocated for command
642  */
643 /*ARGSUSED*/
644 static void
645 pqi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
646 {
647 	pqi_cmd_t	cmd = PKT2CMD(pkt);
648 
649 	if (cmd->pc_flags & PQI_FLAG_DMA_VALID) {
650 		cmd->pc_flags &= ~PQI_FLAG_DMA_VALID;
651 		(void) ddi_dma_unbind_handle(cmd->pc_dmahdl);
652 	}
653 }
654 
655 /*
656  * tran_sync_pkt(9E) - explicit DMA synchronization
657  */
658 /*ARGSUSED*/
659 static void
660 pqi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
661 {
662 	pqi_cmd_t	cmd = PKT2CMD(pkt);
663 
664 	if (cmd->pc_dmahdl != NULL) {
665 		(void) ddi_dma_sync(cmd->pc_dmahdl, 0, 0,
666 		    (cmd->pc_flags & PQI_FLAG_IO_READ) ? DDI_DMA_SYNC_FORCPU :
667 		    DDI_DMA_SYNC_FORDEV);
668 	}
669 }
670 
671 static int
672 pqi_reset_notify(struct scsi_address *ap, int flag,
673     void (*callback)(caddr_t), caddr_t arg)
674 {
675 	pqi_state_t	s = ADDR2PQI(ap);
676 
677 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
678 	    &s->s_mutex, &s->s_reset_notify_listf));
679 }
680 
681 /*
682  * Device / Hotplug control
683  */
684 /*ARGSUSED*/
685 static int
686 pqi_quiesce(dev_info_t *dip)
687 {
688 	pqi_state_t	s;
689 	scsi_hba_tran_t	*tran;
690 
691 	if ((tran = ddi_get_driver_private(dip)) == NULL ||
692 	    (s = TRAN2PQI(tran)) == NULL)
693 		return (-1);
694 
695 	mutex_enter(&s->s_mutex);
696 	if (!HBA_IS_QUIESCED(s))
697 		s->s_flags |= PQI_HBA_QUIESCED;
698 
699 	if (s->s_cmd_queue_len != 0) {
700 		/* ---- Outstanding commands present, wait ---- */
701 		s->s_flags |= PQI_HBA_QUIESCED_PENDING;
702 		cv_wait(&s->s_quiescedvar, &s->s_mutex);
703 		ASSERT0(s->s_cmd_queue_len);
704 	}
705 	mutex_exit(&s->s_mutex);
706 
707 	return (0);
708 }
709 
710 /*ARGSUSED*/
711 static int
712 pqi_unquiesce(dev_info_t *dip)
713 {
714 	pqi_state_t	s;
715 	scsi_hba_tran_t	*tran;
716 
717 	if ((tran = ddi_get_driver_private(dip)) == NULL ||
718 	    (s = TRAN2PQI(tran)) == NULL)
719 		return (-1);
720 
721 	mutex_enter(&s->s_mutex);
722 	if (!HBA_IS_QUIESCED(s)) {
723 		mutex_exit(&s->s_mutex);
724 		return (0);
725 	}
726 	ASSERT0(s->s_cmd_queue_len);
727 	s->s_flags &= ~PQI_HBA_QUIESCED;
728 	mutex_exit(&s->s_mutex);
729 
730 	return (0);
731 }
732 
733 /*ARGSUSED*/
734 static int
735 pqi_bus_config(dev_info_t *pdip, uint_t flag,
736     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
737 {
738 	scsi_hba_tran_t	*tran;
739 	pqi_state_t	s;
740 	int		circ	= 0;
741 	int		circ1	= 0;
742 	int		ret	= NDI_FAILURE;
743 	pqi_device_t	d;
744 	char		*ua;
745 
746 	tran = ddi_get_driver_private(pdip);
747 	s = tran->tran_hba_private;
748 	if (pqi_is_offline(s))
749 		return (NDI_FAILURE);
750 
751 	ndi_devi_enter(scsi_vhci_dip, &circ1);
752 	ndi_devi_enter(pdip, &circ);
753 	switch (op) {
754 	case BUS_CONFIG_ONE:
755 		if ((ua = strrchr((char *)arg, '@')) != NULL) {
756 			ua++;
757 			d = pqi_find_target_ua(s, ua);
758 			if (d != NULL)
759 				ret = config_one(pdip, s, d, childp);
760 		} else {
761 			dev_err(s->s_dip, CE_WARN, "Couldn't decode %s",
762 			    (char *)arg);
763 		}
764 		flag |= NDI_MDI_FALLBACK;
765 		break;
766 
767 	case BUS_CONFIG_DRIVER:
768 	case BUS_CONFIG_ALL:
769 		ret = pqi_config_all(pdip, s);
770 		break;
771 	default:
772 		ret = NDI_FAILURE;
773 	}
774 	if (ret == NDI_SUCCESS)
775 		ret = ndi_busop_bus_config(pdip, flag, op, arg, childp, 0);
776 	ndi_devi_exit(pdip, circ);
777 	ndi_devi_exit(scsi_vhci_dip, circ1);
778 
779 	return (ret);
780 }
781 
782 pqi_device_t
783 pqi_find_target_ua(pqi_state_t s, char *ua)
784 {
785 	pqi_device_t d;
786 
787 	for (d = list_head(&s->s_devnodes); d != NULL;
788 	    d = list_next(&s->s_devnodes, d)) {
789 		if (d->pd_online && strcmp(ua, d->pd_unit_address) == 0)
790 			break;
791 	}
792 	return (d);
793 }
794 
795 int
796 pqi_config_all(dev_info_t *pdip, pqi_state_t s)
797 {
798 	pqi_device_t d;
799 
800 	/*
801 	 * Make sure we bring the available devices into play first. These
802 	 * might be brand new devices just hotplugged into the system or
803 	 * they could be devices previously offlined because either they
804 	 * were pulled from an enclosure or a cable to the enclosure was
805 	 * pulled.
806 	 */
807 	for (d = list_head(&s->s_devnodes); d != NULL;
808 	    d = list_next(&s->s_devnodes, d)) {
809 		if (d->pd_online)
810 			(void) config_one(pdip, s, d, NULL);
811 	}
812 
813 	/*
814 	 * Now deal with devices that we had previously known about, but are
815 	 * no longer available.
816 	 */
817 	for (d = list_head(&s->s_devnodes); d != NULL;
818 	    d = list_next(&s->s_devnodes, d)) {
819 		if (!d->pd_online)
820 			(void) config_one(pdip, s, d, NULL);
821 	}
822 
823 	return (NDI_SUCCESS);
824 }
825 
826 void
827 pqi_quiesced_notify(pqi_state_t s)
828 {
829 	mutex_enter(&s->s_mutex);
830 	if (s->s_cmd_queue_len == 0 &&
831 	    (s->s_flags & PQI_HBA_QUIESCED_PENDING) != 0) {
832 		s->s_flags &= ~PQI_HBA_QUIESCED_PENDING;
833 		cv_broadcast(&s->s_quiescedvar);
834 	}
835 	mutex_exit(&s->s_mutex);
836 }
837 
838 /*
839  * []------------------------------------------------------------------[]
840  * | Support routines used only by the trans_xxx routines		|
841  * []------------------------------------------------------------------[]
842  */
843 
844 static void
845 cmd_timeout_scan(void *v)
846 {
847 	pqi_state_t	s = v;
848 	pqi_device_t	d;
849 	pqi_cmd_t	cmd;
850 	hrtime_t	now = gethrtime();
851 	list_t		to_scan;
852 
853 	mutex_enter(&s->s_mutex);
854 	for (d = list_head(&s->s_devnodes); d != NULL;
855 	    d = list_next(&s->s_devnodes, d)) {
856 
857 		list_create(&to_scan, sizeof (struct pqi_cmd),
858 		    offsetof(struct pqi_cmd, pc_list));
859 
860 		mutex_enter(&d->pd_mutex);
861 		list_move_tail(&to_scan, &d->pd_cmd_list);
862 
863 		while ((cmd = list_remove_head(&to_scan)) != NULL) {
864 			if (cmd->pc_expiration < now) {
865 				struct scsi_pkt	*pkt	= CMD2PKT(cmd);
866 
867 				pkt->pkt_reason = CMD_TIMEOUT;
868 				pkt->pkt_statistics = STAT_TIMEOUT;
869 
870 				/*
871 				 * Insert the command back onto the list, with
872 				 * the lock held, so that the state machine
873 				 * can do its processing which removes the
874 				 * command from the list and calls pkt_comp.
875 				 */
876 				list_insert_tail(&d->pd_cmd_list, cmd);
877 				pqi_cmd_sm(cmd, PQI_CMD_FATAL, B_FALSE);
878 
879 			} else {
880 				/*
881 				 * Once a command's experiation date is in
882 				 * the future this command and all remaining
883 				 * commands on the chain are in the future as
884 				 * well. So, add them back to the device
885 				 * command list lock, stock, and barrel. Then
886 				 * stop processing for this command.
887 				 */
888 				list_insert_tail(&d->pd_cmd_list, cmd);
889 				list_move_tail(&d->pd_cmd_list, &to_scan);
890 				break;
891 			}
892 		}
893 		mutex_exit(&d->pd_mutex);
894 	}
895 
896 	/*
897 	 * Certain commands are issued and run serially through the driver.
898 	 * These all should complete no matter what since they are commands
899 	 * which are actually sent to the HBA. Yet, there have been cases
900 	 * where the HBA failed to respond. So, if the time is past the
901 	 * expired time mark the IO has having a timeout error and call the
902 	 * return function.
903 	 */
904 	if (s->s_sync_io != NULL &&
905 	    s->s_sync_expire < now) {
906 		s->s_sync_io->io_status = PQI_DATA_IN_OUT_TIMEOUT;
907 		s->s_sync_io->io_cb(s->s_sync_io, s->s_sync_io->io_context);
908 	}
909 
910 	mutex_exit(&s->s_mutex);
911 	s->s_cmd_timeout = timeout(cmd_timeout_scan, s,
912 	    CMD_TIMEOUT_SCAN_SECS * drv_usectohz(MICROSEC));
913 }
914 
915 /*ARGSUSED*/
916 static void
917 abort_all(struct scsi_address *ap, pqi_state_t s)
918 {
919 	pqi_device_t	devp;
920 
921 	if ((devp = scsi_device_hba_private_get(ap->a.a_sd)) == NULL)
922 		return;
923 
924 	pqi_fail_drive_cmds(devp);
925 }
926 
927 static boolean_t
928 create_phys_lun(pqi_state_t s, pqi_device_t d,
929     struct scsi_inquiry *inq, dev_info_t **childp)
930 {
931 	char		**compatible	= NULL;
932 	char		*nodename	= NULL;
933 	int		ncompatible	= 0;
934 	dev_info_t	*dip;
935 
936 	/* ---- At this point we have a new device not in our list ---- */
937 	scsi_hba_nodename_compatible_get(inq, NULL,
938 	    inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
939 	if (nodename == NULL)
940 		return (B_FALSE);
941 
942 	if (ndi_devi_alloc(s->s_dip, nodename, DEVI_SID_NODEID, &dip) !=
943 	    NDI_SUCCESS) {
944 		dev_err(s->s_dip, CE_WARN, "failed to alloc device instance");
945 		goto free_nodename;
946 	}
947 
948 	d->pd_dip = dip;
949 	d->pd_pip = NULL;
950 
951 	if (ndi_prop_update_int64(DDI_DEV_T_NONE, dip, LUN64_PROP,
952 	    d->pd_lun) != DDI_PROP_SUCCESS) {
953 		goto free_devi;
954 	}
955 
956 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, dip, COMPAT_PROP,
957 	    compatible, ncompatible) != DDI_PROP_SUCCESS) {
958 		goto free_devi;
959 	}
960 
961 	if (d->pd_wwid != 0) {
962 		char		wwn_str[20];
963 		(void) snprintf(wwn_str, 20, "w%016" PRIx64, d->pd_wwid);
964 		if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
965 		    SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS) {
966 			goto free_devi;
967 		}
968 	} else {
969 		if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, TARGET_PROP,
970 		    d->pd_target) != DDI_PROP_SUCCESS) {
971 			goto free_devi;
972 		}
973 	}
974 
975 	if (d->pd_guid != NULL) {
976 		if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, NDI_GUID,
977 		    d->pd_guid) != DDI_PROP_SUCCESS) {
978 			goto free_devi;
979 		}
980 	}
981 
982 	if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, "pm-capable", 1) !=
983 	    DDI_PROP_SUCCESS) {
984 		goto free_devi;
985 	}
986 
987 	if (ndi_devi_online(dip, NDI_ONLINE_ATTACH) != NDI_SUCCESS)
988 		goto free_devi;
989 
990 	if (childp != NULL)
991 		*childp = dip;
992 
993 	scsi_hba_nodename_compatible_free(nodename, compatible);
994 	return (B_TRUE);
995 
996 free_devi:
997 	ndi_prop_remove_all(dip);
998 	(void) ndi_devi_free(dip);
999 	d->pd_dip = NULL;
1000 free_nodename:
1001 	scsi_hba_nodename_compatible_free(nodename, compatible);
1002 	return (B_FALSE);
1003 }
1004 
1005 static boolean_t
1006 create_virt_lun(pqi_state_t s, pqi_device_t d, struct scsi_inquiry *inq,
1007     dev_info_t **childp)
1008 {
1009 	char		*nodename;
1010 	char		**compatible;
1011 	int		ncompatible;
1012 	int		rval;
1013 	mdi_pathinfo_t	*pip		= NULL;
1014 	char		*guid_ptr;
1015 	char		wwid_str[17];
1016 	dev_info_t	*lun_dip;
1017 	char		*old_guid;
1018 
1019 	if (d->pd_pip_offlined != NULL) {
1020 		lun_dip = mdi_pi_get_client(d->pd_pip_offlined);
1021 		ASSERT(lun_dip != NULL);
1022 
1023 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
1024 		    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1025 		    MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
1026 			if (strncmp(d->pd_guid, old_guid,
1027 			    strlen(d->pd_guid)) == 0) {
1028 				/* ---- Same path came back online ---- */
1029 				(void) ddi_prop_free(old_guid);
1030 				if (mdi_pi_online(d->pd_pip_offlined, 0) ==
1031 				    DDI_SUCCESS) {
1032 					d->pd_pip = d->pd_pip_offlined;
1033 					d->pd_pip_offlined = NULL;
1034 					return (B_TRUE);
1035 				} else {
1036 					return (B_FALSE);
1037 				}
1038 			} else {
1039 				/* ---- Different device in slot ---- */
1040 				(void) ddi_prop_free(old_guid);
1041 				if (mdi_pi_offline(d->pd_pip_offlined, 0) !=
1042 				    DDI_SUCCESS) {
1043 					return (B_FALSE);
1044 				}
1045 				if (mdi_pi_free(d->pd_pip_offlined, 0) !=
1046 				    MDI_SUCCESS) {
1047 					return (B_FALSE);
1048 				}
1049 				d->pd_pip_offlined = NULL;
1050 			}
1051 		} else {
1052 			dev_err(s->s_dip, CE_WARN, "Can't get client-guid "
1053 			    "property for lun %lx", d->pd_wwid);
1054 			return (B_FALSE);
1055 		}
1056 	}
1057 
1058 	scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
1059 	    &nodename, &compatible, &ncompatible);
1060 	if (nodename == NULL)
1061 		return (B_FALSE);
1062 
1063 	if (d->pd_guid != NULL) {
1064 		guid_ptr = d->pd_guid;
1065 	} else {
1066 		(void) snprintf(wwid_str, sizeof (wwid_str), "%" PRIx64,
1067 		    d->pd_wwid);
1068 		guid_ptr = wwid_str;
1069 	}
1070 	rval = mdi_pi_alloc_compatible(s->s_dip, nodename, guid_ptr,
1071 	    d->pd_unit_address, compatible, ncompatible, 0, &pip);
1072 	if (rval == MDI_SUCCESS) {
1073 		mdi_pi_set_phci_private(pip, (caddr_t)d);
1074 
1075 		if (mdi_prop_update_string(pip, MDI_GUID, guid_ptr) !=
1076 		    DDI_SUCCESS) {
1077 			dev_err(s->s_dip, CE_WARN,
1078 			    "unable to create property (MDI_GUID) for %s",
1079 			    guid_ptr);
1080 			goto cleanup;
1081 		}
1082 
1083 		/*
1084 		 * For MPxIO, we actually don't really need to care
1085 		 * about the LUN or target property, because nothing
1086 		 * really uses them.
1087 		 */
1088 		if (mdi_prop_update_int64(pip, LUN64_PROP, d->pd_lun) !=
1089 		    DDI_SUCCESS) {
1090 			dev_err(s->s_dip, CE_WARN,
1091 			    "unable to create property (%s) for %s",
1092 			    LUN64_PROP, guid_ptr);
1093 			goto cleanup;
1094 		}
1095 
1096 		if (mdi_prop_update_string_array(pip, COMPAT_PROP,
1097 		    compatible, ncompatible) != DDI_SUCCESS) {
1098 			dev_err(s->s_dip, CE_WARN,
1099 			    "unable to create property (%s) for %s",
1100 			    COMPAT_PROP, guid_ptr);
1101 			goto cleanup;
1102 		}
1103 
1104 		if (mdi_pi_online(pip, 0) == MDI_NOT_SUPPORTED)
1105 			goto cleanup;
1106 
1107 		d->pd_dip = NULL;
1108 		d->pd_pip = pip;
1109 	}
1110 
1111 	scsi_hba_nodename_compatible_free(nodename, compatible);
1112 	if (childp != NULL)
1113 		*childp = mdi_pi_get_client(pip);
1114 	return (B_TRUE);
1115 cleanup:
1116 	scsi_hba_nodename_compatible_free(nodename, compatible);
1117 	d->pd_pip = NULL;
1118 	d->pd_dip = NULL;
1119 	(void) mdi_prop_remove(pip, NULL);
1120 	(void) mdi_pi_free(pip, 0);
1121 	return (B_FALSE);
1122 }
1123 
1124 static int
1125 config_one(dev_info_t *pdip, pqi_state_t s, pqi_device_t d,
1126     dev_info_t **childp)
1127 {
1128 	struct scsi_inquiry	inq;
1129 	boolean_t		rval = B_FALSE;
1130 
1131 	/* ---- Inquiry target ---- */
1132 	if (!d->pd_online ||
1133 	    pqi_scsi_inquiry(s, d, 0, &inq, sizeof (inq)) == B_FALSE) {
1134 		pqi_fail_drive_cmds(d);
1135 
1136 		if (d->pd_dip != NULL) {
1137 			(void) ndi_devi_offline(d->pd_dip,
1138 			    NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
1139 			d->pd_dip = NULL;
1140 		} else if (d->pd_pip != NULL) {
1141 			(void) mdi_pi_offline(d->pd_pip, 0);
1142 			d->pd_pip_offlined = d->pd_pip;
1143 			d->pd_pip = NULL;
1144 		}
1145 		return (NDI_FAILURE);
1146 	} else if (d->pd_dip != NULL) {
1147 		if (childp != NULL)
1148 			*childp = d->pd_dip;
1149 		return (NDI_SUCCESS);
1150 	} else if (d->pd_pip != NULL) {
1151 		if (childp != NULL)
1152 			*childp = mdi_pi_get_client(d->pd_pip);
1153 		return (NDI_SUCCESS);
1154 	}
1155 
1156 	d->pd_parent = pdip;
1157 	if ((!s->s_disable_mpxio) && is_physical_dev(d))
1158 		rval = create_virt_lun(s, d, &inq, childp);
1159 
1160 	if (rval == B_FALSE)
1161 		rval = create_phys_lun(s, d, &inq, childp);
1162 
1163 	return ((rval == B_TRUE) ? NDI_SUCCESS : NDI_FAILURE);
1164 }
1165 
1166 static void
1167 cmd_ext_free(pqi_cmd_t cmd)
1168 {
1169 	struct scsi_pkt *pkt = CMD2PKT(cmd);
1170 
1171 	if ((cmd->pc_flags & PQI_FLAG_CDB_EXT) != 0) {
1172 		kmem_free(pkt->pkt_cdbp, cmd->pc_cmdlen);
1173 		cmd->pc_flags &= ~PQI_FLAG_CDB_EXT;
1174 	}
1175 	if ((cmd->pc_flags & PQI_FLAG_SCB_EXT) != 0) {
1176 		kmem_free(pkt->pkt_scbp, cmd->pc_statuslen);
1177 		cmd->pc_flags &= ~PQI_FLAG_SCB_EXT;
1178 	}
1179 	if ((cmd->pc_flags & PQI_FLAG_PRIV_EXT) != 0) {
1180 		kmem_free(pkt->pkt_private, cmd->pc_tgtlen);
1181 		cmd->pc_flags &= ~PQI_FLAG_PRIV_EXT;
1182 	}
1183 }
1184 
1185 static int
1186 cmd_ext_alloc(pqi_cmd_t cmd, int kf)
1187 {
1188 	struct scsi_pkt		*pkt = CMD2PKT(cmd);
1189 	void			*buf;
1190 
1191 	if (cmd->pc_cmdlen > sizeof (cmd->pc_cdb)) {
1192 		if ((buf = kmem_zalloc(cmd->pc_cmdlen, kf)) == NULL)
1193 			return (DDI_FAILURE);
1194 		pkt->pkt_cdbp = buf;
1195 		cmd->pc_flags |= PQI_FLAG_CDB_EXT;
1196 	}
1197 
1198 	if (cmd->pc_statuslen > sizeof (cmd->pc_cmd_scb)) {
1199 		if ((buf = kmem_zalloc(cmd->pc_statuslen, kf)) == NULL)
1200 			goto out;
1201 		pkt->pkt_scbp = buf;
1202 		cmd->pc_flags |= PQI_FLAG_SCB_EXT;
1203 	}
1204 
1205 	if (cmd->pc_tgtlen > sizeof (cmd->pc_tgt_priv)) {
1206 		if ((buf = kmem_zalloc(cmd->pc_tgtlen, kf)) == NULL)
1207 			goto out;
1208 		pkt->pkt_private = buf;
1209 		cmd->pc_flags |= PQI_FLAG_PRIV_EXT;
1210 	}
1211 
1212 	return (DDI_SUCCESS);
1213 
1214 out:
1215 	cmd_ext_free(cmd);
1216 
1217 	return (DDI_FAILURE);
1218 }
1219 
1220 static boolean_t
1221 is_physical_dev(pqi_device_t d)
1222 {
1223 	return (d->pd_phys_dev ? B_TRUE : B_FALSE);
1224 }
1225