1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2024 Racktop Systems, Inc.
14 */
15
16 /*
17 * This file implements the RAID iport and tgtmap of lmrc.
18 *
19 * When the RAID iport is attached, a FULLSET tgtmap is created for RAID
20 * devices (LDs). This does not only include RAID volumes, as one would expect,
21 * but also physical disk on some controllers in JBOD mode.
22 *
23 * During attach or as a result of an async event received from the hardware,
24 * we'll get the LD list from the HBA and populate the tgtmap with what we have
25 * found. For each LD we'll try to get the SAS WWN by sending an INQUIRY for
26 * VPD 0x83, setting up a temporary struct scsi_device to be able to use the
27 * normal SCSI I/O code path despite the device not being known to the system
28 * at this point.
29 *
30 * If the device has a SAS WWN, this will be used as device address. Otherwise
31 * we'll use the internal target ID the HBA uses.
32 *
33 * The target activate and deactivate callbacks for RAID devices are kept really
34 * simple, just calling the common lmrc_tgt init/clear functions.
35 */
36
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39
40 #include <sys/scsi/adapters/mfi/mfi.h>
41 #include <sys/scsi/adapters/mfi/mfi_evt.h>
42 #include <sys/scsi/adapters/mfi/mfi_ld.h>
43
44 #include "lmrc.h"
45 #include "lmrc_reg.h"
46 #include "lmrc_raid.h"
47
48 static int lmrc_get_raidmap(lmrc_t *, lmrc_fw_raid_map_t **);
49 static int lmrc_sync_raidmap(lmrc_t *);
50 static void lmrc_sync_raidmap_again(lmrc_t *, lmrc_mfi_cmd_t *);
51 static void lmrc_complete_sync_raidmap(lmrc_t *, lmrc_mfi_cmd_t *);
52 static int lmrc_validate_raidmap(lmrc_t *, lmrc_fw_raid_map_t *);
53
54 static void lmrc_raid_tgt_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t,
55 void **);
56 static boolean_t lmrc_raid_tgt_deactivate_cb(void *, char *,
57 scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t);
58 static struct buf *lmrc_raid_send_inquiry(lmrc_t *, lmrc_tgt_t *, uint8_t,
59 uint8_t);
60 static uint64_t lmrc_raid_get_wwn(lmrc_t *, uint8_t);
61 static int lmrc_raid_update_tgtmap(lmrc_t *, mfi_ld_tgtid_list_t *);
62
63
64 /*
65 * lmrc_get_raidmap
66 *
67 * Get the RAID map from firmware. Return a minimally sized copy.
68 */
69 static int
lmrc_get_raidmap(lmrc_t * lmrc,lmrc_fw_raid_map_t ** raidmap)70 lmrc_get_raidmap(lmrc_t *lmrc, lmrc_fw_raid_map_t **raidmap)
71 {
72 lmrc_mfi_cmd_t *mfi;
73 lmrc_fw_raid_map_t *rm;
74 int ret;
75
76 mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, MFI_DCMD_LD_MAP_GET_INFO,
77 lmrc->l_max_map_sz, 4);
78
79 if (mfi == NULL)
80 return (DDI_FAILURE);
81
82 ret = lmrc_issue_blocked_mfi(lmrc, mfi);
83
84 if (ret != DDI_SUCCESS)
85 goto out;
86
87 (void) ddi_dma_sync(mfi->mfi_data_dma.ld_hdl, 0,
88 mfi->mfi_data_dma.ld_len, DDI_DMA_SYNC_FORKERNEL);
89
90 rm = mfi->mfi_data_dma.ld_buf;
91 if (rm->rm_raidmap_sz > lmrc->l_max_map_sz) {
92 dev_err(lmrc->l_dip, CE_WARN,
93 "!FW reports a too large RAID map size: %d",
94 rm->rm_raidmap_sz);
95 ret = DDI_FAILURE;
96 goto out;
97 }
98
99 *raidmap = kmem_zalloc(rm->rm_raidmap_sz, KM_SLEEP);
100 bcopy(rm, *raidmap, rm->rm_raidmap_sz);
101
102 out:
103 lmrc_put_dcmd(lmrc, mfi);
104
105 return (ret);
106 }
107
108 /*
109 * lmrc_sync_raidmap
110 *
111 * Generate a LD target map from the RAID map and send that to the firmware.
112 * The command will complete when firmware detects a change, returning a new
113 * RAID map in the DMA memory. The size of the RAID map isn't expected to
114 * change, so thats what's used as size for the DMA memory.
115 *
116 * mbox byte values:
117 * [0]: number of LDs
118 * [1]: PEND_FLAG, delay completion until a config change pending
119 */
120 static int
lmrc_sync_raidmap(lmrc_t * lmrc)121 lmrc_sync_raidmap(lmrc_t *lmrc)
122 {
123 lmrc_fw_raid_map_t *rm;
124 lmrc_mfi_cmd_t *mfi;
125 mfi_dcmd_payload_t *dcmd;
126
127 rw_enter(&lmrc->l_raidmap_lock, RW_READER);
128 rm = lmrc->l_raidmap;
129 mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_WRITE, MFI_DCMD_LD_MAP_GET_INFO,
130 rm->rm_raidmap_sz, 4);
131
132 if (mfi == NULL) {
133 rw_exit(&lmrc->l_raidmap_lock);
134 return (DDI_FAILURE);
135 }
136
137 dcmd = &mfi->mfi_frame->mf_dcmd;
138 dcmd->md_mbox_8[0] = rm->rm_ld_count;
139 dcmd->md_mbox_8[1] = MFI_DCMD_MBOX_PEND_FLAG;
140 rw_exit(&lmrc->l_raidmap_lock);
141
142 mutex_enter(&mfi->mfi_lock);
143 lmrc_sync_raidmap_again(lmrc, mfi);
144 mutex_exit(&mfi->mfi_lock);
145
146 return (DDI_SUCCESS);
147 }
148
149 /*
150 * lmrc_sync_raidmap_again
151 *
152 * Called by lmrc_sync_raidmap() and lmrc_complete_sync_raidmap() to avoid
153 * deallocating and reallocating DMA memory and MFI command in the latter,
154 * while executing in interrupt context.
155 *
156 * This is doing the actual work of building the LD target map for FW and
157 * issuing the command, but it does no sleeping allocations and it cannot fail.
158 */
159 static void
lmrc_sync_raidmap_again(lmrc_t * lmrc,lmrc_mfi_cmd_t * mfi)160 lmrc_sync_raidmap_again(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
161 {
162 lmrc_fw_raid_map_t *rm;
163 lmrc_dma_t *dma = &mfi->mfi_data_dma;
164 mfi_ld_ref_t *ld_sync = dma->ld_buf;
165 mfi_dcmd_payload_t *dcmd = &mfi->mfi_frame->mf_dcmd;
166 uint32_t ld;
167
168 bzero(dma->ld_buf, dma->ld_len);
169
170 rw_enter(&lmrc->l_raidmap_lock, RW_READER);
171 rm = lmrc->l_raidmap;
172 for (ld = 0; ld < rm->rm_ld_count; ld++) {
173 lmrc_ld_raid_t *lr = lmrc_ld_raid_get(ld, rm);
174
175 ASSERT(lr != NULL);
176
177 ld_sync[ld].lr_tgtid = lr->lr_target_id;
178 ld_sync[ld].lr_seqnum = lr->lr_seq_num;
179 }
180 dcmd->md_mbox_8[0] = rm->rm_ld_count;
181 rw_exit(&lmrc->l_raidmap_lock);
182
183 ASSERT(mutex_owned(&mfi->mfi_lock));
184 lmrc_issue_mfi(lmrc, mfi, lmrc_complete_sync_raidmap);
185 }
186
187 /*
188 * lmrc_complete_sync_raidmap
189 *
190 * The firmware completed our request to sync the LD target map, indicating
191 * that the configuration has changed. There's a new RAID map in the DMA memory.
192 */
193 static void
lmrc_complete_sync_raidmap(lmrc_t * lmrc,lmrc_mfi_cmd_t * mfi)194 lmrc_complete_sync_raidmap(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
195 {
196 mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
197 lmrc_dma_t *dma = &mfi->mfi_data_dma;
198 lmrc_fw_raid_map_t *rm = dma->ld_buf;
199
200 ASSERT(mutex_owned(&mfi->mfi_lock));
201
202 if (hdr->mh_cmd_status != MFI_STAT_OK) {
203 /* Was the command aborted? */
204 if (hdr->mh_cmd_status == MFI_STAT_NOT_FOUND)
205 return;
206
207 dev_err(lmrc->l_dip, CE_WARN,
208 "!LD target map sync failed, status = %d",
209 hdr->mh_cmd_status);
210 taskq_dispatch_ent(lmrc->l_taskq, (task_func_t *)lmrc_put_mfi,
211 mfi, TQ_NOSLEEP, &mfi->mfi_tqent);
212 return;
213 }
214
215 if (lmrc_validate_raidmap(lmrc, rm) != DDI_SUCCESS)
216 return;
217
218 rw_enter(&lmrc->l_raidmap_lock, RW_WRITER);
219 VERIFY3U(lmrc->l_raidmap->rm_raidmap_sz, ==, dma->ld_len);
220 bcopy(rm, lmrc->l_raidmap, lmrc->l_raidmap->rm_raidmap_sz);
221 rw_exit(&lmrc->l_raidmap_lock);
222 lmrc_sync_raidmap_again(lmrc, mfi);
223 }
224
225 /*
226 * lmrc_validata_raidmap
227 *
228 * Basic sanity checks of a RAID map as returned by the firmware.
229 */
230 static int
lmrc_validate_raidmap(lmrc_t * lmrc,lmrc_fw_raid_map_t * raidmap)231 lmrc_validate_raidmap(lmrc_t *lmrc, lmrc_fw_raid_map_t *raidmap)
232 {
233 lmrc_raid_map_desc_t *desc;
234 int i;
235
236 /* Do a basic sanity check of the descriptor table offset and sizes. */
237 if (raidmap->rm_desc_table_off > raidmap->rm_raidmap_sz)
238 return (DDI_FAILURE);
239 if (raidmap->rm_desc_table_off + raidmap->rm_desc_table_sz >
240 raidmap->rm_raidmap_sz)
241 return (DDI_FAILURE);
242 if (raidmap->rm_desc_table_nelem != LMRC_RAID_MAP_DESC_TYPES_COUNT)
243 return (DDI_FAILURE);
244 if (raidmap->rm_desc_table_sz !=
245 raidmap->rm_desc_table_nelem * sizeof (lmrc_raid_map_desc_t))
246 return (DDI_FAILURE);
247
248 desc = (lmrc_raid_map_desc_t *)
249 ((uint8_t *)raidmap + raidmap->rm_desc_table_off);
250
251 /* Fill in descriptor pointers */
252 for (i = 0; i < raidmap->rm_desc_table_nelem; i++) {
253 /* Do a basic sanity check of the descriptor itself. */
254 if (desc[i].rmd_type >= LMRC_RAID_MAP_DESC_TYPES_COUNT)
255 return (DDI_FAILURE);
256 if (desc[i].rmd_off + raidmap->rm_desc_table_off +
257 raidmap->rm_desc_table_sz >
258 raidmap->rm_raidmap_sz)
259 return (DDI_FAILURE);
260 if (desc[i].rmd_off + desc[i].rmd_bufsz +
261 raidmap->rm_desc_table_off + raidmap->rm_desc_table_sz >
262 raidmap->rm_raidmap_sz)
263 return (DDI_FAILURE);
264
265 raidmap->rm_desc_ptrs[desc[i].rmd_type] = (void *)
266 ((uint8_t *)desc + raidmap->rm_desc_table_sz +
267 desc[i].rmd_off);
268 }
269
270 return (DDI_SUCCESS);
271 }
272
273 /*
274 * lmrc_setup_raidmap
275 *
276 * Get the crrent RAID map from the firmware. If it validates, replace the
277 * copy in the soft state and send a LD target map to the firmware.
278 */
279 int
lmrc_setup_raidmap(lmrc_t * lmrc)280 lmrc_setup_raidmap(lmrc_t *lmrc)
281 {
282 lmrc_fw_raid_map_t *raidmap;
283 int ret;
284
285 ret = lmrc_get_raidmap(lmrc, &raidmap);
286 if (ret != DDI_SUCCESS)
287 return (ret);
288
289 ret = lmrc_validate_raidmap(lmrc, raidmap);
290 if (ret != DDI_SUCCESS) {
291 kmem_free(raidmap, raidmap->rm_raidmap_sz);
292 return (ret);
293 }
294
295 rw_enter(&lmrc->l_raidmap_lock, RW_WRITER);
296 lmrc_free_raidmap(lmrc);
297 lmrc->l_raidmap = raidmap;
298 rw_exit(&lmrc->l_raidmap_lock);
299
300 ret = lmrc_sync_raidmap(lmrc);
301
302 return (ret);
303 }
304
305 /*
306 * lmrc_free_raidmap
307 *
308 * Free the buffer used to hold the RAID map.
309 */
310 void
lmrc_free_raidmap(lmrc_t * lmrc)311 lmrc_free_raidmap(lmrc_t *lmrc)
312 {
313 if (lmrc->l_raidmap != NULL) {
314 kmem_free(lmrc->l_raidmap, lmrc->l_raidmap->rm_raidmap_sz);
315 lmrc->l_raidmap = NULL;
316 }
317 }
318
319 /*
320 * lmrc_ld_tm_capable
321 */
322 boolean_t
lmrc_ld_tm_capable(lmrc_t * lmrc,uint16_t tgtid)323 lmrc_ld_tm_capable(lmrc_t *lmrc, uint16_t tgtid)
324 {
325 boolean_t tm_capable = B_FALSE;
326
327 rw_enter(&lmrc->l_raidmap_lock, RW_READER);
328 if (lmrc->l_raidmap != NULL) {
329 uint16_t ld_id = lmrc_ld_id_get(tgtid, lmrc->l_raidmap);
330 lmrc_ld_raid_t *lr = lmrc_ld_raid_get(ld_id, lmrc->l_raidmap);
331
332 if (lr->lr_cap.lc_tm_cap != 0)
333 tm_capable = B_TRUE;
334 }
335 rw_exit(&lmrc->l_raidmap_lock);
336
337 return (tm_capable);
338 }
339
340
341
342 /*
343 * lmrc_raid_tgt_activate_cb
344 *
345 * Set up a tgt structure for a newly discovered LD.
346 */
347 static void
lmrc_raid_tgt_activate_cb(void * tgtmap_priv,char * tgt_addr,scsi_tgtmap_tgt_type_t type,void ** tgt_privp)348 lmrc_raid_tgt_activate_cb(void *tgtmap_priv, char *tgt_addr,
349 scsi_tgtmap_tgt_type_t type, void **tgt_privp)
350 {
351 lmrc_t *lmrc = tgtmap_priv;
352 lmrc_tgt_t *tgt = *tgt_privp;
353 uint16_t tgtid = tgt - lmrc->l_targets;
354
355 VERIFY(lmrc == tgt->tgt_lmrc);
356
357 VERIFY3U(tgtid, <, LMRC_MAX_LD);
358
359 lmrc_tgt_init(tgt, tgtid, tgt_addr, NULL);
360 }
361
362 /*
363 * lmrc_raid_tgt_deactivate_cb
364 *
365 * Tear down the tgt structure of a LD that is no longer present.
366 */
367 static boolean_t
lmrc_raid_tgt_deactivate_cb(void * tgtmap_priv,char * tgtaddr,scsi_tgtmap_tgt_type_t type,void * tgt_priv,scsi_tgtmap_deact_rsn_t deact)368 lmrc_raid_tgt_deactivate_cb(void *tgtmap_priv, char *tgtaddr,
369 scsi_tgtmap_tgt_type_t type, void *tgt_priv, scsi_tgtmap_deact_rsn_t deact)
370 {
371 lmrc_t *lmrc = tgtmap_priv;
372 lmrc_tgt_t *tgt = tgt_priv;
373
374 VERIFY(lmrc == tgt->tgt_lmrc);
375
376 lmrc_tgt_clear(tgt);
377
378 return (B_FALSE);
379 }
380
381 /*
382 * lmrc_raid_send_inquiry
383 *
384 * Fake a scsi_device and scsi_address, use the SCSA functions to allocate
385 * a buf and a scsi_pkt, and issue a INQUIRY command to the target. Return
386 * the buf on success, NULL otherwise.
387 */
388 static struct buf *
lmrc_raid_send_inquiry(lmrc_t * lmrc,lmrc_tgt_t * tgt,uint8_t evpd,uint8_t page_code)389 lmrc_raid_send_inquiry(lmrc_t *lmrc, lmrc_tgt_t *tgt, uint8_t evpd,
390 uint8_t page_code)
391 {
392 struct buf *inq_bp = NULL;
393 struct scsi_pkt *inq_pkt = NULL;
394 const size_t len = 0xf0; /* max INQUIRY length */
395 struct scsi_device sd;
396 int ret;
397
398 /*
399 * Fake a scsi_device and scsi_address so we can use the scsi functions,
400 * which in turn call our tran_setup_pkt and tran_start functions.
401 */
402 bzero(&sd, sizeof (sd));
403 sd.sd_address.a_hba_tran = ddi_get_driver_private(lmrc->l_raid_dip);
404 sd.sd_address.a.a_sd = &sd;
405 scsi_device_hba_private_set(&sd, tgt);
406
407 /*
408 * Get a buffer for INQUIRY.
409 */
410 inq_bp = scsi_alloc_consistent_buf(&sd.sd_address, NULL,
411 len, B_READ, SLEEP_FUNC, NULL);
412
413 if (inq_bp == NULL)
414 goto out;
415
416 inq_pkt = scsi_init_pkt(&sd.sd_address, NULL, inq_bp, CDB_GROUP0,
417 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, SLEEP_FUNC,
418 NULL);
419
420 if (inq_pkt == NULL)
421 goto fail;
422
423 (void) scsi_setup_cdb((union scsi_cdb *)inq_pkt->pkt_cdbp,
424 SCMD_INQUIRY, 0, len, 0);
425 inq_pkt->pkt_cdbp[1] = evpd;
426 inq_pkt->pkt_cdbp[2] = page_code;
427
428 ret = scsi_poll(inq_pkt);
429
430 scsi_destroy_pkt(inq_pkt);
431
432 if (ret != 0) {
433 fail:
434 scsi_free_consistent_buf(inq_bp);
435 inq_bp = NULL;
436 }
437
438 out:
439 return (inq_bp);
440 }
441
442 /*
443 * lmrc_raid_get_wwn
444 *
445 * LDs may have a WWN, but the hardware doesn't just tell us about it.
446 * Send an INQUIRY to the target and get VPD page 0x83. If the target
447 * does have a WWN, return it.
448 */
449 static uint64_t
lmrc_raid_get_wwn(lmrc_t * lmrc,uint8_t tgtid)450 lmrc_raid_get_wwn(lmrc_t *lmrc, uint8_t tgtid)
451 {
452 lmrc_tgt_t *tgt = &lmrc->l_targets[tgtid];
453 char *guid = NULL;
454 struct buf *inq_bp = NULL, *inq83_bp = NULL;
455 uint64_t wwn = 0;
456 ddi_devid_t devid;
457 int ret;
458
459 /*
460 * Make sure we have the target ID set in the target structure.
461 */
462 rw_enter(&tgt->tgt_lock, RW_WRITER);
463 VERIFY3U(tgt->tgt_lmrc, ==, lmrc);
464 if (tgt->tgt_dev_id == LMRC_DEVHDL_INVALID)
465 tgt->tgt_dev_id = tgtid;
466 else
467 VERIFY3U(tgt->tgt_dev_id, ==, tgtid);
468 rw_exit(&tgt->tgt_lock);
469
470 /* Get basic INQUIRY data from device. */
471 inq_bp = lmrc_raid_send_inquiry(lmrc, tgt, 0, 0);
472 if (inq_bp == NULL)
473 goto fail;
474
475 /* Get VPD 83 from INQUIRY. */
476 inq83_bp = lmrc_raid_send_inquiry(lmrc, tgt, 1, 0x83);
477 if (inq83_bp == NULL)
478 goto fail;
479
480 /* Try to turn the VPD83 data into a devid. */
481 ret = ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION1,
482 NULL, (uchar_t *)inq_bp->b_un.b_addr, sizeof (struct scsi_inquiry),
483 NULL, 0, (uchar_t *)inq83_bp->b_un.b_addr, inq83_bp->b_bcount,
484 &devid);
485 if (ret != DDI_SUCCESS)
486 goto fail;
487
488 /* Extract the GUID from the devid. */
489 guid = ddi_devid_to_guid(devid);
490 if (guid == NULL)
491 goto fail;
492
493 /* Convert the GUID to a WWN. */
494 (void) scsi_wwnstr_to_wwn(guid, &wwn);
495
496 ddi_devid_free_guid(guid);
497
498 fail:
499 if (inq_bp != NULL)
500 scsi_free_consistent_buf(inq_bp);
501 if (inq83_bp != NULL)
502 scsi_free_consistent_buf(inq83_bp);
503
504 return (wwn);
505 }
506
507 /*
508 * lmrc_raid_update_tgtmap
509 *
510 * Feed the LD target ID list into the target map. Try to get a WWN for each LD.
511 */
512 static int
lmrc_raid_update_tgtmap(lmrc_t * lmrc,mfi_ld_tgtid_list_t * ld_list)513 lmrc_raid_update_tgtmap(lmrc_t *lmrc, mfi_ld_tgtid_list_t *ld_list)
514 {
515 int ret;
516 int i;
517
518 if (ld_list->ltl_count > lmrc->l_fw_supported_vd_count)
519 return (DDI_FAILURE);
520
521 ret = scsi_hba_tgtmap_set_begin(lmrc->l_raid_tgtmap);
522 if (ret != DDI_SUCCESS)
523 return (ret);
524
525 for (i = 0; i < ld_list->ltl_count; i++) {
526 uint8_t tgtid = ld_list->ltl_tgtid[i];
527 char name[SCSI_WWN_BUFLEN];
528 uint64_t wwn;
529
530 if (tgtid > lmrc->l_fw_supported_vd_count) {
531 dev_err(lmrc->l_dip, CE_WARN,
532 "!%s: invalid LD tgt id %d", __func__, tgtid);
533 goto fail;
534 }
535
536 wwn = lmrc_raid_get_wwn(lmrc, tgtid);
537 if (wwn != 0)
538 (void) scsi_wwn_to_wwnstr(wwn, 0, name);
539 else
540 (void) snprintf(name, sizeof (name), "%d", tgtid);
541
542 ret = scsi_hba_tgtmap_set_add(lmrc->l_raid_tgtmap,
543 SCSI_TGT_SCSI_DEVICE, name, &lmrc->l_targets[tgtid]);
544
545 if (ret != DDI_SUCCESS)
546 goto fail;
547 }
548
549 return (scsi_hba_tgtmap_set_end(lmrc->l_raid_tgtmap, 0));
550
551 fail:
552 (void) scsi_hba_tgtmap_set_flush(lmrc->l_raid_tgtmap);
553 return (DDI_FAILURE);
554 }
555
556 /*
557 * lmrc_get_ld_list
558 *
559 * Query the controller for a list of currently known LDs. Use the information
560 * to update the target map.
561 */
562 int
lmrc_get_ld_list(lmrc_t * lmrc)563 lmrc_get_ld_list(lmrc_t *lmrc)
564 {
565 mfi_dcmd_payload_t *dcmd;
566 lmrc_mfi_cmd_t *mfi;
567 int ret;
568
569 /* If the raid iport isn't attached yet, just return success. */
570 if (!INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_RAID))
571 return (DDI_SUCCESS);
572
573 mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, MFI_DCMD_LD_LIST_QUERY,
574 sizeof (mfi_ld_tgtid_list_t) + lmrc->l_fw_supported_vd_count, 1);
575
576 if (mfi == NULL)
577 return (DDI_FAILURE);
578
579 dcmd = &mfi->mfi_frame->mf_dcmd;
580 dcmd->md_mbox_8[0] = MFI_LD_QUERY_TYPE_EXPOSED_TO_HOST;
581
582 if (lmrc->l_max_256_vd_support)
583 dcmd->md_mbox_8[2] = 1;
584
585 ret = lmrc_issue_blocked_mfi(lmrc, mfi);
586
587 if (ret != DDI_SUCCESS)
588 goto out;
589
590 ret = lmrc_raid_update_tgtmap(lmrc, mfi->mfi_data_dma.ld_buf);
591
592 out:
593 lmrc_put_dcmd(lmrc, mfi);
594 return (ret);
595 }
596
597 /*
598 * lmrc_raid_aen_handler
599 *
600 * Handle AENs with locale code MFI_EVT_LOCALE_LD. If the LD configuration
601 * changed, update the LD list and target map.
602 */
603 int
lmrc_raid_aen_handler(lmrc_t * lmrc,mfi_evt_detail_t * evt)604 lmrc_raid_aen_handler(lmrc_t *lmrc, mfi_evt_detail_t *evt)
605 {
606 int ret = DDI_SUCCESS;
607
608 switch (evt->evt_code) {
609 case MFI_EVT_LD_CC_STARTED:
610 case MFI_EVT_LD_CC_PROGRESS:
611 case MFI_EVT_LD_CC_COMPLETE:
612 /*
613 * Consistency Check. I/O is possible during consistency check,
614 * so there's no need to do anything.
615 */
616 break;
617
618 case MFI_EVT_LD_FAST_INIT_STARTED:
619 case MFI_EVT_LD_FULL_INIT_STARTED:
620 /*
621 * A LD initialization process has been started.
622 */
623 ret = lmrc_get_ld_list(lmrc);
624 break;
625
626 case MFI_EVT_LD_BG_INIT_PROGRESS:
627 case MFI_EVT_LD_INIT_PROGRESS:
628 /*
629 * FULL INIT reports these for every percent of completion.
630 * Ignore.
631 */
632 break;
633
634 case MFI_EVT_LD_INIT_ABORTED:
635 case MFI_EVT_LD_INIT_COMPLETE:
636 /*
637 * The LD initialization has ended, one way or another.
638 */
639 ret = lmrc_get_ld_list(lmrc);
640 break;
641
642 case MFI_EVT_LD_BBT_CLEARED:
643 /*
644 * The Bad Block Table for the LD has been cleared. This usually
645 * follows a INIT_COMPLETE, but may occur in other situations.
646 * Ignore.
647 */
648 break;
649
650 case MFI_EVT_LD_PROP_CHANGED:
651 /*
652 * Happens when LD props are changed, such as setting the
653 * "hidden" property. There's little we can do here as we
654 * don't which property changed which way. In any case,
655 * this is usually followed by a HOST BUS SCAN REQD which
656 * will handle any changes.
657 */
658 break;
659
660 case MFI_EVT_LD_OFFLINE:
661 /*
662 * Not sure when this happens, but since the LD is offline we
663 * should just remove it from the target map.
664 */
665 ret = lmrc_get_ld_list(lmrc);
666 break;
667
668 case MFI_EVT_LD_DELETED:
669 /*
670 * A LD was deleted, remove it from target map.
671 */
672 ret = lmrc_get_ld_list(lmrc);
673 break;
674
675 case MFI_EVT_LD_OPTIMAL:
676 /*
677 * There might be several cases when this event occurs,
678 * in particular when a LD is created. In that case it's the
679 * first of several events, so we can ignore it.
680 */
681 break;
682
683 case MFI_EVT_LD_CREATED:
684 /*
685 * This is the 2nd event generated when a LD is created, and
686 * it's the one FreeBSD and Linux act on. Add the LD to the
687 * target map.
688 */
689 ret = lmrc_get_ld_list(lmrc);
690 break;
691
692 case MFI_EVT_LD_AVAILABLE:
693 /*
694 * This event happens last when a LD is created, but there may
695 * be other scenarios where this occurs. Ignore it for now.
696 */
697 break;
698
699 case MFI_EVT_LD_STATE_CHANGE:
700 /*
701 * Not sure when this happens, but updating the LD list is
702 * probably a good idea.
703 */
704 ret = lmrc_get_ld_list(lmrc);
705 break;
706
707 default:
708 ret = DDI_FAILURE;
709 }
710
711 return (ret);
712 }
713
714 int
lmrc_raid_attach(dev_info_t * dip)715 lmrc_raid_attach(dev_info_t *dip)
716 {
717 scsi_hba_tran_t *tran = ddi_get_driver_private(dip);
718 dev_info_t *pdip = ddi_get_parent(dip);
719 lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(pdip));
720 int ret;
721
722 VERIFY(tran != NULL);
723 VERIFY(lmrc != NULL);
724
725 if (lmrc->l_fw_fault)
726 return (DDI_FAILURE);
727
728 tran->tran_hba_private = lmrc;
729 lmrc->l_raid_dip = dip;
730
731 ret = scsi_hba_tgtmap_create(dip, SCSI_TM_FULLSET, MICROSEC,
732 2 * MICROSEC, lmrc, lmrc_raid_tgt_activate_cb,
733 lmrc_raid_tgt_deactivate_cb, &lmrc->l_raid_tgtmap);
734 if (ret != DDI_SUCCESS)
735 return (ret);
736
737 ret = lmrc_setup_raidmap(lmrc);
738 if (ret != DDI_SUCCESS) {
739 dev_err(lmrc->l_dip, CE_WARN, "!RAID map setup failed.");
740 return (DDI_FAILURE);
741 }
742
743 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_RAID);
744
745 ret = lmrc_get_ld_list(lmrc);
746 if (ret != DDI_SUCCESS) {
747 dev_err(lmrc->l_dip, CE_WARN, "!Failed to get LD list.");
748 return (ret);
749 }
750
751 return (DDI_SUCCESS);
752 }
753
754 int
lmrc_raid_detach(dev_info_t * dip)755 lmrc_raid_detach(dev_info_t *dip)
756 {
757 dev_info_t *pdip = ddi_get_parent(dip);
758 lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(pdip));
759
760 VERIFY(lmrc != NULL);
761 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_RAID);
762
763 if (lmrc->l_raid_tgtmap != NULL) {
764 scsi_hba_tgtmap_destroy(lmrc->l_raid_tgtmap);
765 lmrc->l_raid_tgtmap = NULL;
766 }
767
768 lmrc->l_raid_dip = NULL;
769
770 return (DDI_SUCCESS);
771 }
772