1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2019 Joyent, Inc.
14 */
15
16 #include <sys/scsi/adapters/smrt/smrt.h>
17
18 /*
19 * The controller is not allowed to attach.
20 */
21 static int
smrt_ctrl_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)22 smrt_ctrl_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
23 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
24 {
25 return (DDI_FAILURE);
26 }
27
28 /*
29 * The controller is not allowed to send packets.
30 */
31 static int
smrt_ctrl_tran_start(struct scsi_address * sa,struct scsi_pkt * pkt)32 smrt_ctrl_tran_start(struct scsi_address *sa, struct scsi_pkt *pkt)
33 {
34 return (TRAN_BADPKT);
35 }
36
37 static boolean_t
smrt_logvol_parse(const char * ua,uint_t * targp)38 smrt_logvol_parse(const char *ua, uint_t *targp)
39 {
40 long targ, lun;
41 const char *comma;
42 char *eptr;
43
44 comma = strchr(ua, ',');
45 if (comma == NULL) {
46 return (B_FALSE);
47 }
48
49 /*
50 * We expect the target number for a logical unit number to be zero for
51 * a logical volume.
52 */
53 if (ddi_strtol(comma + 1, &eptr, 16, &lun) != 0 || *eptr != '\0' ||
54 lun != 0) {
55 return (B_FALSE);
56 }
57
58 if (ddi_strtol(ua, &eptr, 16, &targ) != 0 || eptr != comma ||
59 targ < 0 || targ >= SMRT_MAX_LOGDRV) {
60 return (B_FALSE);
61 }
62
63 *targp = (uint_t)targ;
64
65 return (B_TRUE);
66 }
67
68 static int
smrt_logvol_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)69 smrt_logvol_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
70 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
71 {
72 _NOTE(ARGUNUSED(hba_dip))
73
74 smrt_volume_t *smlv;
75 smrt_target_t *smtg;
76 const char *ua;
77 uint_t targ;
78
79 smrt_t *smrt = (smrt_t *)hba_tran->tran_hba_private;
80 dev_info_t *dip = smrt->smrt_dip;
81
82 /*
83 * The unit address comes in the form of 'target,lun'. We expect the
84 * lun to be zero. The target is what we set when we added it to the
85 * target map earlier.
86 */
87 ua = scsi_device_unit_address(sd);
88 if (ua == NULL) {
89 return (DDI_FAILURE);
90 }
91
92 if (!smrt_logvol_parse(ua, &targ)) {
93 return (DDI_FAILURE);
94 }
95
96 if ((smtg = kmem_zalloc(sizeof (*smtg), KM_NOSLEEP)) == NULL) {
97 dev_err(dip, CE_WARN, "could not allocate target object "
98 "due to memory exhaustion");
99 return (DDI_FAILURE);
100 }
101
102 mutex_enter(&smrt->smrt_mutex);
103
104 if (smrt->smrt_status & SMRT_CTLR_STATUS_DETACHING) {
105 /*
106 * We are detaching. Do not accept any more requests to
107 * attach targets from the framework.
108 */
109 mutex_exit(&smrt->smrt_mutex);
110 kmem_free(smtg, sizeof (*smtg));
111 return (DDI_FAILURE);
112 }
113
114 /*
115 * Look for a logical volume for the SCSI unit address of this target.
116 */
117 if ((smlv = smrt_logvol_lookup_by_id(smrt, targ)) == NULL) {
118 mutex_exit(&smrt->smrt_mutex);
119 kmem_free(smtg, sizeof (*smtg));
120 return (DDI_FAILURE);
121 }
122
123 smtg->smtg_lun.smtg_vol = smlv;
124 smtg->smtg_addr = &smlv->smlv_addr;
125 smtg->smtg_physical = B_FALSE;
126 list_insert_tail(&smlv->smlv_targets, smtg);
127
128 /*
129 * Link this target object to the controller:
130 */
131 smtg->smtg_ctlr = smrt;
132 list_insert_tail(&smrt->smrt_targets, smtg);
133
134 smtg->smtg_scsi_dev = sd;
135 VERIFY(sd->sd_dev == tgt_dip);
136
137 scsi_device_hba_private_set(sd, smtg);
138
139 mutex_exit(&smrt->smrt_mutex);
140 return (DDI_SUCCESS);
141 }
142
143 static void
smrt_logvol_tran_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)144 smrt_logvol_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
145 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
146 {
147 _NOTE(ARGUNUSED(hba_dip, tgt_dip))
148
149 smrt_t *smrt = (smrt_t *)hba_tran->tran_hba_private;
150 smrt_target_t *smtg = scsi_device_hba_private_get(sd);
151 smrt_volume_t *smlv = smtg->smtg_lun.smtg_vol;
152
153 VERIFY(smtg->smtg_scsi_dev == sd);
154 VERIFY(smtg->smtg_physical == B_FALSE);
155
156 mutex_enter(&smrt->smrt_mutex);
157 list_remove(&smlv->smlv_targets, smtg);
158 list_remove(&smrt->smrt_targets, smtg);
159
160 scsi_device_hba_private_set(sd, NULL);
161
162 mutex_exit(&smrt->smrt_mutex);
163
164 kmem_free(smtg, sizeof (*smtg));
165 }
166
167 static int
smrt_phys_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)168 smrt_phys_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
169 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
170 {
171 _NOTE(ARGUNUSED(hba_dip))
172
173 smrt_target_t *smtg;
174 smrt_physical_t *smpt;
175 const char *ua, *comma;
176 char *eptr;
177 long lun;
178
179 smrt_t *smrt = (smrt_t *)hba_tran->tran_hba_private;
180 dev_info_t *dip = smrt->smrt_dip;
181
182 /*
183 * The unit address comes in the form of 'target,lun'. We expect the
184 * lun to be zero. The target is what we set when we added it to the
185 * target map earlier.
186 */
187 ua = scsi_device_unit_address(sd);
188 if (ua == NULL)
189 return (DDI_FAILURE);
190
191 comma = strchr(ua, ',');
192 if (comma == NULL) {
193 return (DDI_FAILURE);
194 }
195
196 /*
197 * Confirm the LUN is zero. We may want to instead check the scsi
198 * 'lun'/'lun64' property or do so in addition to this logic.
199 */
200 if (ddi_strtol(comma + 1, &eptr, 16, &lun) != 0 || *eptr != '\0' ||
201 lun != 0) {
202 return (DDI_FAILURE);
203 }
204
205 if ((smtg = kmem_zalloc(sizeof (*smtg), KM_NOSLEEP)) == NULL) {
206 dev_err(dip, CE_WARN, "could not allocate target object "
207 "due to memory exhaustion");
208 return (DDI_FAILURE);
209 }
210
211 mutex_enter(&smrt->smrt_mutex);
212
213 if (smrt->smrt_status & SMRT_CTLR_STATUS_DETACHING) {
214 /*
215 * We are detaching. Do not accept any more requests to
216 * attach targets from the framework.
217 */
218 mutex_exit(&smrt->smrt_mutex);
219 kmem_free(smtg, sizeof (*smtg));
220 return (DDI_FAILURE);
221 }
222
223
224 /*
225 * Look for a physical target based on the unit address of the target
226 * (which will encode its WWN and LUN).
227 */
228 smpt = smrt_phys_lookup_by_ua(smrt, ua);
229 if (smpt == NULL) {
230 mutex_exit(&smrt->smrt_mutex);
231 kmem_free(smtg, sizeof (*smtg));
232 return (DDI_FAILURE);
233 }
234
235 smtg->smtg_scsi_dev = sd;
236 smtg->smtg_physical = B_TRUE;
237 smtg->smtg_lun.smtg_phys = smpt;
238 list_insert_tail(&smpt->smpt_targets, smtg);
239 smtg->smtg_addr = &smpt->smpt_addr;
240
241 /*
242 * Link this target object to the controller:
243 */
244 smtg->smtg_ctlr = smrt;
245 list_insert_tail(&smrt->smrt_targets, smtg);
246
247 VERIFY(sd->sd_dev == tgt_dip);
248 smtg->smtg_scsi_dev = sd;
249
250 scsi_device_hba_private_set(sd, smtg);
251 mutex_exit(&smrt->smrt_mutex);
252
253 return (DDI_SUCCESS);
254 }
255
256 static void
smrt_phys_tran_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)257 smrt_phys_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
258 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
259 {
260 _NOTE(ARGUNUSED(hba_dip, tgt_dip))
261
262 smrt_t *smrt = (smrt_t *)hba_tran->tran_hba_private;
263 smrt_target_t *smtg = scsi_device_hba_private_get(sd);
264 smrt_physical_t *smpt = smtg->smtg_lun.smtg_phys;
265
266 VERIFY(smtg->smtg_scsi_dev == sd);
267 VERIFY(smtg->smtg_physical == B_TRUE);
268
269 mutex_enter(&smrt->smrt_mutex);
270 list_remove(&smpt->smpt_targets, smtg);
271 list_remove(&smrt->smrt_targets, smtg);
272
273 scsi_device_hba_private_set(sd, NULL);
274 mutex_exit(&smrt->smrt_mutex);
275 kmem_free(smtg, sizeof (*smtg));
276 }
277
278 /*
279 * This function is called when the SCSI framework has allocated a packet and
280 * our private per-packet object.
281 *
282 * We choose not to have the framework pre-allocate memory for the CDB.
283 * Instead, we will make available the CDB area in the controller command block
284 * itself.
285 *
286 * Status block memory is allocated by the framework because we passed
287 * SCSI_HBA_TRAN_SCB to scsi_hba_attach_setup(9F).
288 */
289 static int
smrt_tran_setup_pkt(struct scsi_pkt * pkt,int (* callback)(caddr_t),caddr_t arg)290 smrt_tran_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t),
291 caddr_t arg)
292 {
293 _NOTE(ARGUNUSED(arg))
294
295 struct scsi_device *sd;
296 smrt_target_t *smtg;
297 smrt_t *smrt;
298 smrt_command_t *smcm;
299 smrt_command_scsa_t *smcms;
300 int kmflags = callback == SLEEP_FUNC ? KM_SLEEP : KM_NOSLEEP;
301
302 sd = scsi_address_device(&pkt->pkt_address);
303 VERIFY(sd != NULL);
304 smtg = scsi_device_hba_private_get(sd);
305 VERIFY(smtg != NULL);
306 smrt = smtg->smtg_ctlr;
307 VERIFY(smrt != NULL);
308 smcms = (smrt_command_scsa_t *)pkt->pkt_ha_private;
309
310 /*
311 * Check that we have enough space in the command object for the
312 * request from the target driver:
313 */
314 if (pkt->pkt_cdblen > CISS_CDBLEN) {
315 /*
316 * The CDB member of the Request Block of a controller
317 * command is fixed at 16 bytes.
318 */
319 dev_err(smrt->smrt_dip, CE_WARN, "oversize CDB: had %u, "
320 "needed %u", CISS_CDBLEN, pkt->pkt_cdblen);
321 return (-1);
322 }
323
324 /*
325 * Allocate our command block:
326 */
327 if ((smcm = smrt_command_alloc(smrt, SMRT_CMDTYPE_SCSA,
328 kmflags)) == NULL) {
329 return (-1);
330 }
331 smcm->smcm_scsa = smcms;
332 smcms->smcms_command = smcm;
333 smcms->smcms_pkt = pkt;
334
335 pkt->pkt_cdbp = &smcm->smcm_va_cmd->Request.CDB[0];
336 smcm->smcm_va_cmd->Request.CDBLen = pkt->pkt_cdblen;
337
338 smcm->smcm_target = smtg;
339
340 return (0);
341 }
342
343 static void
smrt_tran_teardown_pkt(struct scsi_pkt * pkt)344 smrt_tran_teardown_pkt(struct scsi_pkt *pkt)
345 {
346 smrt_command_scsa_t *smcms = (smrt_command_scsa_t *)
347 pkt->pkt_ha_private;
348 smrt_command_t *smcm = smcms->smcms_command;
349
350 smrt_command_free(smcm);
351
352 pkt->pkt_cdbp = NULL;
353 }
354
355 static void
smrt_set_arq_data(struct scsi_pkt * pkt,uchar_t key)356 smrt_set_arq_data(struct scsi_pkt *pkt, uchar_t key)
357 {
358 struct scsi_arq_status *sts;
359
360 VERIFY3U(pkt->pkt_scblen, >=, sizeof (struct scsi_arq_status));
361
362 /* LINTED: E_BAD_PTR_CAST_ALIGN */
363 sts = (struct scsi_arq_status *)(pkt->pkt_scbp);
364 bzero(sts, sizeof (*sts));
365
366 /*
367 * Mock up a CHECK CONDITION SCSI status for the original command:
368 */
369 sts->sts_status.sts_chk = 1;
370
371 /*
372 * Pretend that we successfully performed REQUEST SENSE:
373 */
374 sts->sts_rqpkt_reason = CMD_CMPLT;
375 sts->sts_rqpkt_resid = 0;
376 sts->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
377 STATE_SENT_CMD | STATE_XFERRED_DATA;
378 sts->sts_rqpkt_statistics = 0;
379
380 /*
381 * Return the key value we were provided in the fake sense data:
382 */
383 sts->sts_sensedata.es_valid = 1;
384 sts->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
385 sts->sts_sensedata.es_key = key;
386
387 pkt->pkt_state |= STATE_ARQ_DONE;
388 }
389
390 /*
391 * When faking up a REPORT LUNS data structure, we simply report one LUN, LUN 0.
392 * We need 16 bytes for this, 4 for the size, 4 reserved bytes, and the 8 for
393 * the actual LUN.
394 */
395 static void
smrt_fake_report_lun(smrt_command_t * smcm,struct scsi_pkt * pkt)396 smrt_fake_report_lun(smrt_command_t *smcm, struct scsi_pkt *pkt)
397 {
398 size_t sz;
399 char resp[16];
400 struct buf *bp;
401
402 pkt->pkt_reason = CMD_CMPLT;
403 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
404 STATE_GOT_STATUS;
405
406 /*
407 * Check to make sure this is valid. If reserved bits are set or if the
408 * mode is one other than 0x00, 0x01, 0x02, then it's an illegal
409 * request.
410 */
411 if (pkt->pkt_cdbp[1] != 0 || pkt->pkt_cdbp[3] != 0 ||
412 pkt->pkt_cdbp[4] != 0 || pkt->pkt_cdbp[5] != 0 ||
413 pkt->pkt_cdbp[10] != 0 || pkt->pkt_cdbp[11] != 0 ||
414 pkt->pkt_cdbp[2] > 0x2) {
415 smrt_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
416 return;
417 }
418
419 /*
420 * Construct the actual REPORT LUNS reply. We need to indicate a single
421 * LUN of all zeros. This means that the length needs to be 8 bytes,
422 * the size of the lun. Otherwise, the rest of this structure can be
423 * zeros.
424 */
425 bzero(resp, sizeof (resp));
426 resp[3] = sizeof (scsi_lun_t);
427
428 bp = scsi_pkt2bp(pkt);
429 sz = MIN(sizeof (resp), bp->b_bcount);
430
431 bp_mapin(bp);
432 bcopy(resp, bp->b_un.b_addr, sz);
433 bp_mapout(bp);
434 pkt->pkt_state |= STATE_XFERRED_DATA;
435 pkt->pkt_resid = bp->b_bcount - sz;
436 if (pkt->pkt_scblen >= 1) {
437 pkt->pkt_scbp[0] = STATUS_GOOD;
438 }
439 }
440
441 static int
smrt_tran_start(struct scsi_address * sa,struct scsi_pkt * pkt)442 smrt_tran_start(struct scsi_address *sa, struct scsi_pkt *pkt)
443 {
444 _NOTE(ARGUNUSED(sa))
445
446 struct scsi_device *sd;
447 smrt_target_t *smtg;
448 smrt_t *smrt;
449 smrt_command_scsa_t *smcms;
450 smrt_command_t *smcm;
451 int r;
452
453 sd = scsi_address_device(&pkt->pkt_address);
454 VERIFY(sd != NULL);
455 smtg = scsi_device_hba_private_get(sd);
456 VERIFY(smtg != NULL);
457 smrt = smtg->smtg_ctlr;
458 VERIFY(smrt != NULL);
459 smcms = (smrt_command_scsa_t *)pkt->pkt_ha_private;
460 VERIFY(smcms != NULL);
461 smcm = smcms->smcms_command;
462 VERIFY(smcm != NULL);
463
464 if (smcm->smcm_status & SMRT_CMD_STATUS_TRAN_START) {
465 /*
466 * This is a retry of a command that has already been
467 * used once. Assign it a new tag number.
468 */
469 smrt_command_reuse(smcm);
470 }
471 smcm->smcm_status |= SMRT_CMD_STATUS_TRAN_START;
472
473 /*
474 * The sophisticated firmware in this controller cannot possibly bear
475 * the following SCSI commands. It appears to return a response with
476 * the status STATUS_ACA_ACTIVE (0x30), which is not something we
477 * expect. Instead, fake up a failure response.
478 */
479 switch (pkt->pkt_cdbp[0]) {
480 case SCMD_FORMAT:
481 case SCMD_LOG_SENSE_G1:
482 case SCMD_MODE_SELECT:
483 case SCMD_PERSISTENT_RESERVE_IN:
484 if (smtg->smtg_physical) {
485 break;
486 }
487
488 smrt->smrt_stats.smrts_ignored_scsi_cmds++;
489 smcm->smcm_status |= SMRT_CMD_STATUS_TRAN_IGNORED;
490
491 /*
492 * Mark the command as completed to the point where we
493 * received a SCSI status code:
494 */
495 pkt->pkt_reason = CMD_CMPLT;
496 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET |
497 STATE_SENT_CMD | STATE_GOT_STATUS;
498
499 /*
500 * Mock up sense data for an illegal request:
501 */
502 smrt_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
503
504 scsi_hba_pkt_comp(pkt);
505 return (TRAN_ACCEPT);
506 case SCMD_REPORT_LUNS:
507 /*
508 * The SMRT controller does not accept a REPORT LUNS command for
509 * logical volumes. As such, we need to fake up a REPORT LUNS
510 * response that has a single LUN, LUN 0.
511 */
512 if (smtg->smtg_physical) {
513 break;
514 }
515
516 smrt_fake_report_lun(smcm, pkt);
517
518 scsi_hba_pkt_comp(pkt);
519 return (TRAN_ACCEPT);
520 default:
521 break;
522 }
523
524 if (pkt->pkt_flags & FLAG_NOINTR) {
525 /*
526 * We must sleep and wait for the completion of this command.
527 */
528 smcm->smcm_status |= SMRT_CMD_STATUS_POLLED;
529 }
530
531 /*
532 * Because we provide a tran_setup_pkt(9E) entrypoint, we must now
533 * set up the Scatter/Gather List in the Command to reflect any
534 * DMA resources passed to us by the framework.
535 */
536 if (pkt->pkt_numcookies > smrt->smrt_sg_cnt) {
537 /*
538 * More DMA cookies than we are prepared to handle.
539 */
540 dev_err(smrt->smrt_dip, CE_WARN, "too many DMA cookies (got %u;"
541 " expected %u)", pkt->pkt_numcookies, smrt->smrt_sg_cnt);
542 return (TRAN_BADPKT);
543 }
544 smcm->smcm_va_cmd->Header.SGList = pkt->pkt_numcookies;
545 smcm->smcm_va_cmd->Header.SGTotal = pkt->pkt_numcookies;
546 for (unsigned i = 0; i < pkt->pkt_numcookies; i++) {
547 smcm->smcm_va_cmd->SG[i].Addr =
548 LE_64(pkt->pkt_cookies[i].dmac_laddress);
549 smcm->smcm_va_cmd->SG[i].Len =
550 LE_32(pkt->pkt_cookies[i].dmac_size);
551 }
552
553 /*
554 * Copy logical volume address from the target object:
555 */
556 smcm->smcm_va_cmd->Header.LUN = *smcm->smcm_target->smtg_addr;
557
558 /*
559 * Initialise the command block.
560 */
561 smcm->smcm_va_cmd->Request.CDBLen = pkt->pkt_cdblen;
562 smcm->smcm_va_cmd->Request.Type.Type = CISS_TYPE_CMD;
563 smcm->smcm_va_cmd->Request.Type.Attribute = CISS_ATTR_SIMPLE;
564 smcm->smcm_va_cmd->Request.Timeout = LE_16(pkt->pkt_time);
565 if (pkt->pkt_numcookies > 0) {
566 /*
567 * There are DMA resources; set the transfer direction
568 * appropriately:
569 */
570 if (pkt->pkt_dma_flags & DDI_DMA_READ) {
571 smcm->smcm_va_cmd->Request.Type.Direction =
572 CISS_XFER_READ;
573 } else if (pkt->pkt_dma_flags & DDI_DMA_WRITE) {
574 smcm->smcm_va_cmd->Request.Type.Direction =
575 CISS_XFER_WRITE;
576 } else {
577 smcm->smcm_va_cmd->Request.Type.Direction =
578 CISS_XFER_NONE;
579 }
580 } else {
581 /*
582 * No DMA resources means no transfer.
583 */
584 smcm->smcm_va_cmd->Request.Type.Direction = CISS_XFER_NONE;
585 }
586
587 /*
588 * Initialise the SCSI packet as described in tran_start(9E). We will
589 * progressively update these fields as the command moves through the
590 * submission and completion states.
591 */
592 pkt->pkt_resid = 0;
593 pkt->pkt_reason = CMD_CMPLT;
594 pkt->pkt_statistics = 0;
595 pkt->pkt_state = 0;
596
597 /*
598 * If this SCSI packet has a timeout, configure an appropriate
599 * expiry time:
600 */
601 if (pkt->pkt_time != 0) {
602 smcm->smcm_expiry = gethrtime() + pkt->pkt_time * NANOSEC;
603 }
604
605 /*
606 * Submit the command to the controller.
607 */
608 mutex_enter(&smrt->smrt_mutex);
609
610 /*
611 * If we're dumping, there's a chance that the target we're talking to
612 * could have ended up disappearing during the process of discovery. If
613 * this target is part of the dump device, we check here and return that
614 * we hit a fatal error.
615 */
616 if (ddi_in_panic() && smtg->smtg_gone) {
617 mutex_exit(&smrt->smrt_mutex);
618
619 dev_err(smrt->smrt_dip, CE_WARN, "smrt_submit failed: target "
620 "%s is gone, it did not come back after post-panic reset "
621 "device discovery", scsi_device_unit_address(sd));
622
623 return (TRAN_FATAL_ERROR);
624 }
625
626 smrt->smrt_stats.smrts_tran_starts++;
627 if ((r = smrt_submit(smrt, smcm)) != 0) {
628 mutex_exit(&smrt->smrt_mutex);
629
630 dev_err(smrt->smrt_dip, CE_WARN, "smrt_submit failed %d", r);
631
632 /*
633 * Inform the SCSI framework that we could not submit
634 * the command.
635 */
636 return (r == EAGAIN ? TRAN_BUSY : TRAN_FATAL_ERROR);
637 }
638
639 /*
640 * Update the SCSI packet to reflect submission of the command.
641 */
642 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
643
644 if (pkt->pkt_flags & FLAG_NOINTR) {
645 /*
646 * Poll the controller for completion of the command we
647 * submitted. Once this routine has returned, the completion
648 * callback will have been fired with either an active response
649 * (success or error) or a timeout. The command is freed by
650 * the completion callback, so it may not be referenced again
651 * after this call returns.
652 */
653 (void) smrt_poll_for(smrt, smcm);
654 }
655
656 mutex_exit(&smrt->smrt_mutex);
657 return (TRAN_ACCEPT);
658 }
659
660 static int
smrt_tran_reset(struct scsi_address * sa,int level)661 smrt_tran_reset(struct scsi_address *sa, int level)
662 {
663 _NOTE(ARGUNUSED(level))
664
665 struct scsi_device *sd;
666 smrt_target_t *smtg;
667 smrt_t *smrt;
668 smrt_command_t *smcm;
669 int r;
670
671 sd = scsi_address_device(sa);
672 VERIFY(sd != NULL);
673 smtg = scsi_device_hba_private_get(sd);
674 VERIFY(smtg != NULL);
675 smrt = smtg->smtg_ctlr;
676
677 /*
678 * The framework has requested some kind of SCSI reset. A
679 * controller-level soft reset can take a very long time -- often on
680 * the order of 30-60 seconds -- but might well be our only option if
681 * the controller is non-responsive.
682 *
683 * First, check if the controller is responding to pings.
684 */
685 again:
686 if ((smcm = smrt_command_alloc(smrt, SMRT_CMDTYPE_INTERNAL,
687 KM_NOSLEEP)) == NULL) {
688 return (0);
689 }
690
691 smrt_write_message_nop(smcm, SMRT_PING_CHECK_TIMEOUT);
692
693 mutex_enter(&smrt->smrt_mutex);
694 smrt->smrt_stats.smrts_tran_resets++;
695 if (ddi_in_panic()) {
696 goto skip_check;
697 }
698
699 if (smrt->smrt_status & SMRT_CTLR_STATUS_RESETTING) {
700 /*
701 * The controller is already resetting. Wait for that
702 * to finish.
703 */
704 while (smrt->smrt_status & SMRT_CTLR_STATUS_RESETTING) {
705 cv_wait(&smrt->smrt_cv_finishq, &smrt->smrt_mutex);
706 }
707 }
708
709 skip_check:
710 /*
711 * Submit our ping to the controller.
712 */
713 smcm->smcm_status |= SMRT_CMD_STATUS_POLLED;
714 smcm->smcm_expiry = gethrtime() + SMRT_PING_CHECK_TIMEOUT * NANOSEC;
715 if (smrt_submit(smrt, smcm) != 0) {
716 mutex_exit(&smrt->smrt_mutex);
717 smrt_command_free(smcm);
718 return (0);
719 }
720
721 if ((r = smrt_poll_for(smrt, smcm)) != 0) {
722 VERIFY3S(r, ==, ETIMEDOUT);
723 VERIFY0(smcm->smcm_status & SMRT_CMD_STATUS_POLL_COMPLETE);
724
725 /*
726 * The ping command timed out. Abandon it now.
727 */
728 dev_err(smrt->smrt_dip, CE_WARN, "controller ping timed out");
729 smcm->smcm_status |= SMRT_CMD_STATUS_ABANDONED;
730 smcm->smcm_status &= ~SMRT_CMD_STATUS_POLLED;
731
732 } else if ((smcm->smcm_status & SMRT_CMD_STATUS_RESET_SENT) ||
733 (smcm->smcm_status & SMRT_CMD_STATUS_ERROR)) {
734 /*
735 * The command completed in error, or a controller reset
736 * was sent while we were trying to ping.
737 */
738 dev_err(smrt->smrt_dip, CE_WARN, "controller ping error");
739 mutex_exit(&smrt->smrt_mutex);
740 smrt_command_free(smcm);
741 mutex_enter(&smrt->smrt_mutex);
742
743 } else {
744 VERIFY(smcm->smcm_status & SMRT_CMD_STATUS_COMPLETE);
745
746 /*
747 * The controller is responsive, and a full soft reset would be
748 * extremely disruptive to the system. Given our spotty
749 * support for some SCSI commands (which can upset the target
750 * drivers) and the historically lax behaviour of the "smrt"
751 * driver, we grit our teeth and pretend we were able to
752 * perform a reset.
753 */
754 mutex_exit(&smrt->smrt_mutex);
755 smrt_command_free(smcm);
756 return (1);
757 }
758
759 /*
760 * If a reset has been initiated in the last 90 seconds, try
761 * another ping.
762 */
763 if (gethrtime() < smrt->smrt_last_reset_start + 90 * NANOSEC) {
764 dev_err(smrt->smrt_dip, CE_WARN, "controller ping failed, but "
765 "was recently reset; retrying ping");
766 mutex_exit(&smrt->smrt_mutex);
767
768 /*
769 * Sleep for a second first.
770 */
771 if (ddi_in_panic()) {
772 drv_usecwait(1 * MICROSEC);
773 } else {
774 delay(drv_usectohz(1 * MICROSEC));
775 }
776 goto again;
777 }
778
779 dev_err(smrt->smrt_dip, CE_WARN, "controller ping failed; resetting "
780 "controller");
781 if (smrt_ctlr_reset(smrt) != 0) {
782 dev_err(smrt->smrt_dip, CE_WARN, "controller reset failure");
783 mutex_exit(&smrt->smrt_mutex);
784 return (0);
785 }
786
787 mutex_exit(&smrt->smrt_mutex);
788 return (1);
789 }
790
791 static int
smrt_tran_abort(struct scsi_address * sa,struct scsi_pkt * pkt)792 smrt_tran_abort(struct scsi_address *sa, struct scsi_pkt *pkt)
793 {
794 struct scsi_device *sd;
795 smrt_target_t *smtg;
796 smrt_t *smrt;
797 smrt_command_t *smcm = NULL;
798 smrt_command_t *abort_smcm;
799
800 sd = scsi_address_device(sa);
801 VERIFY(sd != NULL);
802 smtg = scsi_device_hba_private_get(sd);
803 VERIFY(smtg != NULL);
804 smrt = smtg->smtg_ctlr;
805 VERIFY(smrt != NULL);
806
807
808 if ((abort_smcm = smrt_command_alloc(smrt, SMRT_CMDTYPE_INTERNAL,
809 KM_NOSLEEP)) == NULL) {
810 /*
811 * No resources available to send an abort message.
812 */
813 return (0);
814 }
815
816 mutex_enter(&smrt->smrt_mutex);
817 smrt->smrt_stats.smrts_tran_aborts++;
818 if (pkt != NULL) {
819 /*
820 * The framework wants us to abort a specific SCSI packet.
821 */
822 smrt_command_scsa_t *smcms = (smrt_command_scsa_t *)
823 pkt->pkt_ha_private;
824 smcm = smcms->smcms_command;
825
826 if (!(smcm->smcm_status & SMRT_CMD_STATUS_INFLIGHT)) {
827 /*
828 * This message is not currently in flight, so we
829 * cannot abort it.
830 */
831 goto fail;
832 }
833
834 if (smcm->smcm_status & SMRT_CMD_STATUS_ABORT_SENT) {
835 /*
836 * An abort message for this command has already been
837 * sent to the controller. Return failure.
838 */
839 goto fail;
840 }
841
842 smrt_write_message_abort_one(abort_smcm, smcm->smcm_tag);
843 } else {
844 /*
845 * The framework wants us to abort every in flight command
846 * for the target with this address.
847 */
848 smrt_write_message_abort_all(abort_smcm, smtg->smtg_addr);
849 }
850
851 /*
852 * Submit the abort message to the controller.
853 */
854 abort_smcm->smcm_status |= SMRT_CMD_STATUS_POLLED;
855 if (smrt_submit(smrt, abort_smcm) != 0) {
856 goto fail;
857 }
858
859 if (pkt != NULL) {
860 /*
861 * Record some debugging information about the abort we
862 * sent:
863 */
864 smcm->smcm_abort_time = gethrtime();
865 smcm->smcm_abort_tag = abort_smcm->smcm_tag;
866
867 /*
868 * Mark the command as aborted so that we do not send
869 * a second abort message:
870 */
871 smcm->smcm_status |= SMRT_CMD_STATUS_ABORT_SENT;
872 }
873
874 /*
875 * Poll for completion of the abort message. Note that this function
876 * only fails if we set a timeout on the command, which we have not
877 * done.
878 */
879 VERIFY0(smrt_poll_for(smrt, abort_smcm));
880
881 if ((abort_smcm->smcm_status & SMRT_CMD_STATUS_RESET_SENT) ||
882 (abort_smcm->smcm_status & SMRT_CMD_STATUS_ERROR)) {
883 /*
884 * Either the controller was reset or the abort command
885 * failed.
886 */
887 goto fail;
888 }
889
890 /*
891 * The command was successfully aborted.
892 */
893 mutex_exit(&smrt->smrt_mutex);
894 smrt_command_free(abort_smcm);
895 return (1);
896
897 fail:
898 mutex_exit(&smrt->smrt_mutex);
899 smrt_command_free(abort_smcm);
900 return (0);
901 }
902
903 static void
smrt_hba_complete_status(smrt_command_t * smcm)904 smrt_hba_complete_status(smrt_command_t *smcm)
905 {
906 ErrorInfo_t *ei = smcm->smcm_va_err;
907 struct scsi_pkt *pkt = smcm->smcm_scsa->smcms_pkt;
908
909 bzero(pkt->pkt_scbp, pkt->pkt_scblen);
910
911 if (ei->ScsiStatus != STATUS_CHECK) {
912 /*
913 * If the SCSI status is not CHECK CONDITION, we don't want
914 * to try and read the sense data buffer.
915 */
916 goto simple_status;
917 }
918
919 if (pkt->pkt_scblen < sizeof (struct scsi_arq_status)) {
920 /*
921 * There is not enough room for a request sense structure.
922 * Fall back to reporting just the SCSI status code.
923 */
924 goto simple_status;
925 }
926
927 /* LINTED: E_BAD_PTR_CAST_ALIGN */
928 struct scsi_arq_status *sts = (struct scsi_arq_status *)pkt->pkt_scbp;
929
930 /*
931 * Copy in the SCSI status from the original command.
932 */
933 bcopy(&ei->ScsiStatus, &sts->sts_status, sizeof (sts->sts_status));
934
935 /*
936 * Mock up a successful REQUEST SENSE:
937 */
938 sts->sts_rqpkt_reason = CMD_CMPLT;
939 sts->sts_rqpkt_resid = 0;
940 sts->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
941 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
942 sts->sts_rqpkt_statistics = 0;
943
944 /*
945 * The sense data from the controller should be copied into place
946 * starting at the "sts_sensedata" member of the auto request
947 * sense object.
948 */
949 size_t sense_len = pkt->pkt_scblen - offsetof(struct scsi_arq_status,
950 sts_sensedata);
951 if (ei->SenseLen < sense_len) {
952 /*
953 * Only copy sense data bytes that are within the region
954 * the controller marked as valid.
955 */
956 sense_len = ei->SenseLen;
957 }
958 bcopy(ei->SenseInfo, &sts->sts_sensedata, sense_len);
959
960 pkt->pkt_state |= STATE_ARQ_DONE;
961 return;
962
963 simple_status:
964 if (pkt->pkt_scblen < sizeof (struct scsi_status)) {
965 /*
966 * There is not even enough room for the SCSI status byte.
967 */
968 return;
969 }
970
971 bcopy(&ei->ScsiStatus, pkt->pkt_scbp, sizeof (struct scsi_status));
972 }
973
974 static void
smrt_hba_complete_log_error(smrt_command_t * smcm,const char * name)975 smrt_hba_complete_log_error(smrt_command_t *smcm, const char *name)
976 {
977 smrt_t *smrt = smcm->smcm_ctlr;
978 ErrorInfo_t *ei = smcm->smcm_va_err;
979
980 dev_err(smrt->smrt_dip, CE_WARN, "!SCSI command failed: %s: "
981 "SCSI op %x, CISS status %x, SCSI status %x", name,
982 (unsigned)smcm->smcm_va_cmd->Request.CDB[0],
983 (unsigned)ei->CommandStatus, (unsigned)ei->ScsiStatus);
984 }
985
986 /*
987 * Completion routine for commands submitted to the controller via the SCSI
988 * framework.
989 */
990 void
smrt_hba_complete(smrt_command_t * smcm)991 smrt_hba_complete(smrt_command_t *smcm)
992 {
993 smrt_t *smrt = smcm->smcm_ctlr;
994 ErrorInfo_t *ei = smcm->smcm_va_err;
995 struct scsi_pkt *pkt = smcm->smcm_scsa->smcms_pkt;
996
997 VERIFY(MUTEX_HELD(&smrt->smrt_mutex));
998
999 pkt->pkt_resid = ei->ResidualCnt;
1000
1001 /*
1002 * Check if the controller was reset while this packet was in flight.
1003 */
1004 if (smcm->smcm_status & SMRT_CMD_STATUS_RESET_SENT) {
1005 if (pkt->pkt_reason != CMD_CMPLT) {
1006 /*
1007 * If another error status has already been written,
1008 * do not overwrite it.
1009 */
1010 pkt->pkt_reason = CMD_RESET;
1011 }
1012 pkt->pkt_statistics |= STAT_BUS_RESET | STAT_DEV_RESET;
1013 goto finish;
1014 }
1015
1016 if (!(smcm->smcm_status & SMRT_CMD_STATUS_ERROR)) {
1017 /*
1018 * The command was completed without error by the controller.
1019 *
1020 * As per the specification, if an error was not signalled
1021 * by the controller through the CISS transport method,
1022 * the error information (including CommandStatus) has not
1023 * been written and should not be checked.
1024 */
1025 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1026 goto finish;
1027 }
1028
1029 /*
1030 * Check the completion status to determine what befell this request.
1031 */
1032 switch (ei->CommandStatus) {
1033 case CISS_CMD_SUCCESS:
1034 /*
1035 * In a certain sense, the specification contradicts itself.
1036 * On the one hand, it suggests that a successful command
1037 * will not result in a controller write to the error
1038 * information block; on the other hand, it makes room
1039 * for a status code (0) which denotes a successful
1040 * execution.
1041 *
1042 * To be on the safe side, we check for that condition here.
1043 */
1044 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1045 break;
1046
1047 case CISS_CMD_DATA_UNDERRUN:
1048 /*
1049 * A data underrun occurred. Ideally this will result in
1050 * an appropriate SCSI status and sense data.
1051 */
1052 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1053 break;
1054
1055 case CISS_CMD_TARGET_STATUS:
1056 /*
1057 * The command completed, but an error occurred. We need
1058 * to provide the sense data to the SCSI framework.
1059 */
1060 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1061 break;
1062
1063 case CISS_CMD_DATA_OVERRUN:
1064 /*
1065 * Data overrun has occurred.
1066 */
1067 smrt_hba_complete_log_error(smcm, "data overrun");
1068 pkt->pkt_reason = CMD_DATA_OVR;
1069 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1070 break;
1071
1072 case CISS_CMD_INVALID:
1073 /*
1074 * One or more fields in the command has invalid data.
1075 */
1076 smrt_hba_complete_log_error(smcm, "invalid command");
1077 pkt->pkt_reason = CMD_BADMSG;
1078 pkt->pkt_state |= STATE_GOT_STATUS;
1079 break;
1080
1081 case CISS_CMD_PROTOCOL_ERR:
1082 /*
1083 * An error occurred in communication with the end device.
1084 */
1085 smrt_hba_complete_log_error(smcm, "protocol error");
1086 pkt->pkt_reason = CMD_BADMSG;
1087 pkt->pkt_state |= STATE_GOT_STATUS;
1088 break;
1089
1090 case CISS_CMD_HARDWARE_ERR:
1091 /*
1092 * A hardware error occurred.
1093 */
1094 smrt_hba_complete_log_error(smcm, "hardware error");
1095 pkt->pkt_reason = CMD_INCOMPLETE;
1096 break;
1097
1098 case CISS_CMD_CONNECTION_LOST:
1099 /*
1100 * The connection with the end device cannot be
1101 * re-established.
1102 */
1103 smrt_hba_complete_log_error(smcm, "connection lost");
1104 pkt->pkt_reason = CMD_INCOMPLETE;
1105 break;
1106
1107 case CISS_CMD_ABORTED:
1108 case CISS_CMD_UNSOLICITED_ABORT:
1109 if (smcm->smcm_status & SMRT_CMD_STATUS_TIMEOUT) {
1110 /*
1111 * This abort was arranged by the periodic routine
1112 * in response to an elapsed timeout.
1113 */
1114 pkt->pkt_reason = CMD_TIMEOUT;
1115 pkt->pkt_statistics |= STAT_TIMEOUT;
1116 } else {
1117 pkt->pkt_reason = CMD_ABORTED;
1118 }
1119 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1120 pkt->pkt_statistics |= STAT_ABORTED;
1121 break;
1122
1123 case CISS_CMD_TIMEOUT:
1124 smrt_hba_complete_log_error(smcm, "timeout");
1125 pkt->pkt_reason = CMD_TIMEOUT;
1126 pkt->pkt_statistics |= STAT_TIMEOUT;
1127 break;
1128
1129 default:
1130 /*
1131 * This is an error that we were not prepared to handle.
1132 * Signal a generic transport-level error to the framework.
1133 */
1134 smrt_hba_complete_log_error(smcm, "unexpected error");
1135 pkt->pkt_reason = CMD_TRAN_ERR;
1136 }
1137
1138 /*
1139 * Attempt to read a SCSI status code and any automatic
1140 * request sense data that may exist:
1141 */
1142 smrt_hba_complete_status(smcm);
1143
1144 finish:
1145 mutex_exit(&smrt->smrt_mutex);
1146 scsi_hba_pkt_comp(pkt);
1147 mutex_enter(&smrt->smrt_mutex);
1148 }
1149
1150 static int
smrt_getcap(struct scsi_address * sa,char * cap,int whom)1151 smrt_getcap(struct scsi_address *sa, char *cap, int whom)
1152 {
1153 _NOTE(ARGUNUSED(whom))
1154
1155 struct scsi_device *sd;
1156 smrt_target_t *smtg;
1157 smrt_t *smrt;
1158 int index;
1159
1160 sd = scsi_address_device(sa);
1161 VERIFY(sd != NULL);
1162 smtg = scsi_device_hba_private_get(sd);
1163 VERIFY(smtg != NULL);
1164 smrt = smtg->smtg_ctlr;
1165 VERIFY(smrt != NULL);
1166
1167 if ((index = scsi_hba_lookup_capstr(cap)) == DDI_FAILURE) {
1168 /*
1169 * This capability string could not be translated to an
1170 * ID number, so it must not exist.
1171 */
1172 return (-1);
1173 }
1174
1175 switch (index) {
1176 case SCSI_CAP_CDB_LEN:
1177 /*
1178 * The CDB field in the CISS request block is fixed at 16
1179 * bytes.
1180 */
1181 return (CISS_CDBLEN);
1182
1183 case SCSI_CAP_DMA_MAX:
1184 if (smrt->smrt_dma_attr.dma_attr_maxxfer > INT_MAX) {
1185 return (INT_MAX);
1186 }
1187 return ((int)smrt->smrt_dma_attr.dma_attr_maxxfer);
1188
1189 case SCSI_CAP_SECTOR_SIZE:
1190 if (smrt->smrt_dma_attr.dma_attr_granular > INT_MAX) {
1191 return (-1);
1192 }
1193 return ((int)smrt->smrt_dma_attr.dma_attr_granular);
1194
1195 /*
1196 * If this target corresponds to a physical device, then we always
1197 * indicate that we're on a SAS interconnect. Otherwise, we default to
1198 * saying that we're on a parallel bus. We can't use SAS for
1199 * everything, unfortunately. When you declare yourself to be a SAS
1200 * interconnect, it's expected that you have a full 16-byte WWN as the
1201 * target. If not, devfsadm will not be able to enumerate the device
1202 * and create /dev/[r]dsk entries.
1203 */
1204 case SCSI_CAP_INTERCONNECT_TYPE:
1205 if (smtg->smtg_physical) {
1206 return (INTERCONNECT_SAS);
1207 } else {
1208 return (INTERCONNECT_PARALLEL);
1209 }
1210
1211 case SCSI_CAP_DISCONNECT:
1212 case SCSI_CAP_SYNCHRONOUS:
1213 case SCSI_CAP_WIDE_XFER:
1214 case SCSI_CAP_ARQ:
1215 case SCSI_CAP_UNTAGGED_QING:
1216 case SCSI_CAP_TAGGED_QING:
1217 /*
1218 * These capabilities are supported by the driver and the
1219 * controller. See scsi_ifgetcap(9F) for more information.
1220 */
1221 return (1);
1222
1223 case SCSI_CAP_INITIATOR_ID:
1224 case SCSI_CAP_RESET_NOTIFICATION:
1225 /*
1226 * These capabilities are not supported.
1227 */
1228 return (0);
1229
1230 default:
1231 /*
1232 * The property in question is not known to this driver.
1233 */
1234 return (-1);
1235 }
1236 }
1237
1238 /* ARGSUSED */
1239 static int
smrt_setcap(struct scsi_address * sa,char * cap,int value,int whom)1240 smrt_setcap(struct scsi_address *sa, char *cap, int value, int whom)
1241 {
1242 int index;
1243
1244 if ((index = scsi_hba_lookup_capstr(cap)) == DDI_FAILURE) {
1245 /*
1246 * This capability string could not be translated to an
1247 * ID number, so it must not exist.
1248 */
1249 return (-1);
1250 }
1251
1252 if (whom == 0) {
1253 /*
1254 * When whom is 0, this is a request to set a capability for
1255 * all targets. As per the recommendation in tran_setcap(9E),
1256 * we do not support this mode of operation.
1257 */
1258 return (-1);
1259 }
1260
1261 switch (index) {
1262 case SCSI_CAP_CDB_LEN:
1263 case SCSI_CAP_DMA_MAX:
1264 case SCSI_CAP_SECTOR_SIZE:
1265 case SCSI_CAP_INITIATOR_ID:
1266 case SCSI_CAP_DISCONNECT:
1267 case SCSI_CAP_SYNCHRONOUS:
1268 case SCSI_CAP_WIDE_XFER:
1269 case SCSI_CAP_ARQ:
1270 case SCSI_CAP_UNTAGGED_QING:
1271 case SCSI_CAP_TAGGED_QING:
1272 case SCSI_CAP_RESET_NOTIFICATION:
1273 case SCSI_CAP_INTERCONNECT_TYPE:
1274 /*
1275 * We do not support changing any capabilities at this time.
1276 */
1277 return (0);
1278
1279 default:
1280 /*
1281 * The capability in question is not known to this driver.
1282 */
1283 return (-1);
1284 }
1285 }
1286
1287 int
smrt_ctrl_hba_setup(smrt_t * smrt)1288 smrt_ctrl_hba_setup(smrt_t *smrt)
1289 {
1290 int flags;
1291 dev_info_t *dip = smrt->smrt_dip;
1292 scsi_hba_tran_t *tran;
1293
1294 if ((tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP)) == NULL) {
1295 dev_err(dip, CE_WARN, "could not allocate SCSA resources");
1296 return (DDI_FAILURE);
1297 }
1298
1299 smrt->smrt_hba_tran = tran;
1300 tran->tran_hba_private = smrt;
1301
1302 tran->tran_tgt_init = smrt_ctrl_tran_tgt_init;
1303 tran->tran_tgt_probe = scsi_hba_probe;
1304
1305 tran->tran_start = smrt_ctrl_tran_start;
1306
1307 tran->tran_getcap = smrt_getcap;
1308 tran->tran_setcap = smrt_setcap;
1309
1310 tran->tran_setup_pkt = smrt_tran_setup_pkt;
1311 tran->tran_teardown_pkt = smrt_tran_teardown_pkt;
1312 tran->tran_hba_len = sizeof (smrt_command_scsa_t);
1313 tran->tran_interconnect_type = INTERCONNECT_SAS;
1314
1315 flags = SCSI_HBA_HBA | SCSI_HBA_TRAN_SCB | SCSI_HBA_ADDR_COMPLEX;
1316 if (scsi_hba_attach_setup(dip, &smrt->smrt_dma_attr, tran, flags) !=
1317 DDI_SUCCESS) {
1318 dev_err(dip, CE_WARN, "could not attach to SCSA framework");
1319 scsi_hba_tran_free(tran);
1320 return (DDI_FAILURE);
1321 }
1322
1323 smrt->smrt_init_level |= SMRT_INITLEVEL_SCSA;
1324 return (DDI_SUCCESS);
1325 }
1326
1327 void
smrt_ctrl_hba_teardown(smrt_t * smrt)1328 smrt_ctrl_hba_teardown(smrt_t *smrt)
1329 {
1330 if (smrt->smrt_init_level & SMRT_INITLEVEL_SCSA) {
1331 VERIFY(scsi_hba_detach(smrt->smrt_dip) != DDI_FAILURE);
1332 scsi_hba_tran_free(smrt->smrt_hba_tran);
1333 smrt->smrt_init_level &= ~SMRT_INITLEVEL_SCSA;
1334 }
1335 }
1336
1337 int
smrt_logvol_hba_setup(smrt_t * smrt,dev_info_t * iport)1338 smrt_logvol_hba_setup(smrt_t *smrt, dev_info_t *iport)
1339 {
1340 scsi_hba_tran_t *tran;
1341
1342 tran = ddi_get_driver_private(iport);
1343 if (tran == NULL)
1344 return (DDI_FAILURE);
1345
1346 tran->tran_tgt_init = smrt_logvol_tran_tgt_init;
1347 tran->tran_tgt_free = smrt_logvol_tran_tgt_free;
1348
1349 tran->tran_start = smrt_tran_start;
1350 tran->tran_reset = smrt_tran_reset;
1351 tran->tran_abort = smrt_tran_abort;
1352
1353 tran->tran_hba_private = smrt;
1354
1355 mutex_enter(&smrt->smrt_mutex);
1356 if (scsi_hba_tgtmap_create(iport, SCSI_TM_FULLSET, MICROSEC,
1357 2 * MICROSEC, smrt, smrt_logvol_tgtmap_activate,
1358 smrt_logvol_tgtmap_deactivate, &smrt->smrt_virt_tgtmap) !=
1359 DDI_SUCCESS) {
1360 return (DDI_FAILURE);
1361 }
1362
1363 smrt_discover_request(smrt);
1364 mutex_exit(&smrt->smrt_mutex);
1365
1366 return (DDI_SUCCESS);
1367 }
1368
1369 void
smrt_logvol_hba_teardown(smrt_t * smrt,dev_info_t * iport)1370 smrt_logvol_hba_teardown(smrt_t *smrt, dev_info_t *iport)
1371 {
1372 ASSERT(smrt->smrt_virt_iport == iport);
1373
1374 mutex_enter(&smrt->smrt_mutex);
1375
1376 if (smrt->smrt_virt_tgtmap != NULL) {
1377 scsi_hba_tgtmap_t *t;
1378
1379 /*
1380 * Ensure that we can't be racing with discovery.
1381 */
1382 while (smrt->smrt_status & SMRT_CTLR_DISCOVERY_RUNNING) {
1383 mutex_exit(&smrt->smrt_mutex);
1384 ddi_taskq_wait(smrt->smrt_discover_taskq);
1385 mutex_enter(&smrt->smrt_mutex);
1386 }
1387
1388 t = smrt->smrt_virt_tgtmap;
1389 smrt->smrt_virt_tgtmap = NULL;
1390 mutex_exit(&smrt->smrt_mutex);
1391 scsi_hba_tgtmap_destroy(t);
1392 mutex_enter(&smrt->smrt_mutex);
1393 }
1394
1395 mutex_exit(&smrt->smrt_mutex);
1396 }
1397
1398 int
smrt_phys_hba_setup(smrt_t * smrt,dev_info_t * iport)1399 smrt_phys_hba_setup(smrt_t *smrt, dev_info_t *iport)
1400 {
1401 scsi_hba_tran_t *tran;
1402
1403 tran = ddi_get_driver_private(iport);
1404 if (tran == NULL)
1405 return (DDI_FAILURE);
1406
1407 tran->tran_tgt_init = smrt_phys_tran_tgt_init;
1408 tran->tran_tgt_free = smrt_phys_tran_tgt_free;
1409
1410 tran->tran_start = smrt_tran_start;
1411 tran->tran_reset = smrt_tran_reset;
1412 tran->tran_abort = smrt_tran_abort;
1413
1414 tran->tran_hba_private = smrt;
1415
1416 mutex_enter(&smrt->smrt_mutex);
1417 if (scsi_hba_tgtmap_create(iport, SCSI_TM_FULLSET, MICROSEC,
1418 2 * MICROSEC, smrt, smrt_phys_tgtmap_activate,
1419 smrt_phys_tgtmap_deactivate, &smrt->smrt_phys_tgtmap) !=
1420 DDI_SUCCESS) {
1421 return (DDI_FAILURE);
1422 }
1423
1424 smrt_discover_request(smrt);
1425 mutex_exit(&smrt->smrt_mutex);
1426
1427 return (DDI_SUCCESS);
1428 }
1429
1430 void
smrt_phys_hba_teardown(smrt_t * smrt,dev_info_t * iport)1431 smrt_phys_hba_teardown(smrt_t *smrt, dev_info_t *iport)
1432 {
1433 ASSERT(smrt->smrt_phys_iport == iport);
1434
1435 mutex_enter(&smrt->smrt_mutex);
1436
1437 if (smrt->smrt_phys_tgtmap != NULL) {
1438 scsi_hba_tgtmap_t *t;
1439
1440 /*
1441 * Ensure that we can't be racing with discovery.
1442 */
1443 while (smrt->smrt_status & SMRT_CTLR_DISCOVERY_RUNNING) {
1444 mutex_exit(&smrt->smrt_mutex);
1445 ddi_taskq_wait(smrt->smrt_discover_taskq);
1446 mutex_enter(&smrt->smrt_mutex);
1447 }
1448
1449 t = smrt->smrt_phys_tgtmap;
1450 smrt->smrt_phys_tgtmap = NULL;
1451 mutex_exit(&smrt->smrt_mutex);
1452 scsi_hba_tgtmap_destroy(t);
1453 mutex_enter(&smrt->smrt_mutex);
1454 }
1455
1456 mutex_exit(&smrt->smrt_mutex);
1457 }
1458