1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2024 Racktop Systems, Inc.
14 */
15
16 /*
17 * This file implementes the iport and tgtmap for physical devices on lmrc.
18 *
19 * When the phys iport is attached, a FULLSET tgtmap is created for physical
20 * devices (PDs).
21 *
22 * During attach or as a result of an async event received from the hardware,
23 * we'll get the PD list from the HBA and populate the tgtmap with what we have
24 * found. The PD list includes the SAS WWN of each device found, which we will
25 * use for the unit address.
26 *
27 * In the target activation callback, we'll retrieve the PD info from the HBA
28 * and pass it to lmrc_tgt_init(). This contains additional information such as
29 * the device and interconnect types.
30 */
31
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34
35 #include <sys/scsi/adapters/mfi/mfi.h>
36 #include <sys/scsi/adapters/mfi/mfi_evt.h>
37 #include <sys/scsi/adapters/mfi/mfi_pd.h>
38
39 #include "lmrc.h"
40 #include "lmrc_reg.h"
41 #include "lmrc_raid.h"
42 #include "lmrc_phys.h"
43
44 static int lmrc_get_pdmap(lmrc_t *, mfi_pd_map_t **);
45 static int lmrc_sync_pdmap(lmrc_t *, size_t);
46 static void lmrc_complete_sync_pdmap(lmrc_t *, lmrc_mfi_cmd_t *);
47
48 static mfi_pd_info_t *lmrc_get_pd_info(lmrc_t *, uint16_t);
49 static void lmrc_phys_tgt_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t,
50 void **);
51 static boolean_t lmrc_phys_tgt_deactivate_cb(void *, char *,
52 scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t);
53 static int lmrc_phys_update_tgtmap(lmrc_t *, mfi_pd_list_t *);
54
55 /*
56 * lmrc_get_pdmap
57 *
58 * Get the physical device map from the firmware. Return a minimally sized copy.
59 */
60 static int
lmrc_get_pdmap(lmrc_t * lmrc,mfi_pd_map_t ** pdmap)61 lmrc_get_pdmap(lmrc_t *lmrc, mfi_pd_map_t **pdmap)
62 {
63 uint32_t pdmap_sz = sizeof (mfi_pd_map_t) +
64 sizeof (mfi_pd_cfg_t) * MFI_MAX_PHYSICAL_DRIVES;
65 lmrc_mfi_cmd_t *mfi;
66 mfi_pd_map_t *pm;
67 int ret;
68
69 mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ,
70 MFI_DCMD_SYSTEM_PD_MAP_GET_INFO, pdmap_sz, 4);
71
72 if (mfi == NULL)
73 return (DDI_FAILURE);
74
75 ret = lmrc_issue_blocked_mfi(lmrc, mfi);
76
77 if (ret != DDI_SUCCESS)
78 goto out;
79
80 pm = mfi->mfi_data_dma.ld_buf;
81
82 if (pm->pm_count > MFI_MAX_PHYSICAL_DRIVES) {
83 dev_err(lmrc->l_dip, CE_WARN,
84 "!FW reports too many PDs: %d", pm->pm_count);
85 ret = DDI_FAILURE;
86 goto out;
87 }
88
89 pdmap_sz = sizeof (mfi_pd_map_t) + pm->pm_count * sizeof (mfi_pd_cfg_t);
90 *pdmap = kmem_zalloc(pdmap_sz, KM_SLEEP);
91 bcopy(pm, *pdmap, pdmap_sz);
92
93 out:
94 lmrc_put_dcmd(lmrc, mfi);
95 return (ret);
96 }
97
98 /*
99 * lmrc_sync_pdmap
100 *
101 * Get the physical device map to the firmware. The command will complete
102 * when the firmware detects a change.
103 *
104 * mbox byte values:
105 * [0]: PEND_FLAG, delay completion until a config change pending
106 */
107 static int
lmrc_sync_pdmap(lmrc_t * lmrc,size_t pd_count)108 lmrc_sync_pdmap(lmrc_t *lmrc, size_t pd_count)
109 {
110 uint32_t pdmap_sz = sizeof (mfi_pd_map_t) +
111 pd_count * sizeof (mfi_pd_cfg_t);
112 mfi_dcmd_payload_t *dcmd;
113 lmrc_mfi_cmd_t *mfi;
114
115 mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_WRITE,
116 MFI_DCMD_SYSTEM_PD_MAP_GET_INFO, pdmap_sz, 4);
117
118 if (mfi == NULL)
119 return (DDI_FAILURE);
120
121 dcmd = &mfi->mfi_frame->mf_dcmd;
122 dcmd->md_mbox_8[0] = MFI_DCMD_MBOX_PEND_FLAG;
123
124 mutex_enter(&mfi->mfi_lock);
125 lmrc_issue_mfi(lmrc, mfi, lmrc_complete_sync_pdmap);
126 mutex_exit(&mfi->mfi_lock);
127
128 return (DDI_SUCCESS);
129 }
130
131 /*
132 * lmrc_complete_sync_pdmap
133 *
134 * The PDMAP GET INFO command completed, most likely due to the hardware
135 * detecting a change and informing us.
136 */
137 static void
lmrc_complete_sync_pdmap(lmrc_t * lmrc,lmrc_mfi_cmd_t * mfi)138 lmrc_complete_sync_pdmap(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
139 {
140 mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
141 lmrc_dma_t *dma = &mfi->mfi_data_dma;
142 mfi_pd_map_t *pm = dma->ld_buf;
143 uint32_t pdmap_sz = sizeof (mfi_pd_map_t) +
144 lmrc->l_pdmap->pm_count * sizeof (mfi_pd_cfg_t);
145
146 ASSERT(mutex_owned(&mfi->mfi_lock));
147
148 if (hdr->mh_cmd_status != MFI_STAT_OK) {
149 /* Was the command aborted? */
150 if (hdr->mh_cmd_status == MFI_STAT_NOT_FOUND)
151 return;
152
153 /*
154 * In the case of any other error, log the error and schedule
155 * a taskq to clean up the command.
156 */
157 dev_err(lmrc->l_dip, CE_WARN,
158 "!PD map sync failed, status = %d",
159 hdr->mh_cmd_status);
160 lmrc->l_use_seqnum_jbod_fp = B_FALSE;
161 taskq_dispatch_ent(lmrc->l_taskq, (task_func_t *)lmrc_put_mfi,
162 mfi, TQ_NOSLEEP, &mfi->mfi_tqent);
163 return;
164 }
165
166 VERIFY3U(pdmap_sz, ==, dma->ld_len);
167
168 /* Update our copy of the pdmap and restart the command. */
169 rw_enter(&lmrc->l_pdmap_lock, RW_WRITER);
170 bcopy(pm, lmrc->l_pdmap, pdmap_sz);
171 rw_exit(&lmrc->l_pdmap_lock);
172 bzero(pm, pdmap_sz);
173 lmrc_issue_mfi(lmrc, mfi, lmrc_complete_sync_pdmap);
174 }
175
176 /*
177 * lmrc_setup_pdmap
178 *
179 * Get the physical device map from the firmware, and sync it back.
180 * Replace the copy in the soft state if successful.
181 */
182 int
lmrc_setup_pdmap(lmrc_t * lmrc)183 lmrc_setup_pdmap(lmrc_t *lmrc)
184 {
185 mfi_pd_map_t *pdmap = NULL;
186 int ret;
187
188 ret = lmrc_get_pdmap(lmrc, &pdmap);
189 if (ret != DDI_SUCCESS)
190 return (ret);
191
192 rw_enter(&lmrc->l_pdmap_lock, RW_WRITER);
193 ASSERT(lmrc->l_pdmap == NULL);
194 lmrc->l_pdmap = pdmap;
195 rw_exit(&lmrc->l_pdmap_lock);
196
197 ret = lmrc_sync_pdmap(lmrc, pdmap->pm_count);
198 return (ret);
199 }
200
201 /*
202 * lmrc_free_pdmap
203 *
204 * Free the buffer used to hold the physical device map.
205 */
206 void
lmrc_free_pdmap(lmrc_t * lmrc)207 lmrc_free_pdmap(lmrc_t *lmrc)
208 {
209 if (lmrc->l_pdmap != NULL) {
210 uint32_t pdmap_sz = sizeof (mfi_pd_map_t) +
211 lmrc->l_pdmap->pm_count * sizeof (mfi_pd_cfg_t);
212 kmem_free(lmrc->l_pdmap, pdmap_sz);
213 lmrc->l_pdmap = NULL;
214 }
215 }
216
217 /*
218 * lmrc_pd_tm_capable
219 *
220 * Determine whether a PD can be sent TASK MGMT requests. By default we assume
221 * it can't, unless the the PD map indicates otherwise.
222 */
223 boolean_t
lmrc_pd_tm_capable(lmrc_t * lmrc,uint16_t tgtid)224 lmrc_pd_tm_capable(lmrc_t *lmrc, uint16_t tgtid)
225 {
226 boolean_t tm_capable = B_FALSE;
227
228 rw_enter(&lmrc->l_pdmap_lock, RW_READER);
229 if (lmrc->l_pdmap != NULL &&
230 lmrc->l_pdmap->pm_pdcfg[tgtid].pd_tgtid != LMRC_DEVHDL_INVALID &&
231 lmrc->l_pdmap->pm_pdcfg[tgtid].pd_tm_capable != 0)
232 tm_capable = B_TRUE;
233 rw_exit(&lmrc->l_pdmap_lock);
234
235 return (tm_capable);
236 }
237
238 /*
239 * lmrc_get_pd_info
240 *
241 * Get physical drive info from FW.
242 */
243 static mfi_pd_info_t *
lmrc_get_pd_info(lmrc_t * lmrc,uint16_t dev_id)244 lmrc_get_pd_info(lmrc_t *lmrc, uint16_t dev_id)
245 {
246 mfi_pd_info_t *pdinfo = NULL;
247 lmrc_mfi_cmd_t *mfi;
248 mfi_dcmd_payload_t *dcmd;
249 int ret;
250
251 mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, MFI_DCMD_PD_GET_INFO,
252 sizeof (mfi_pd_info_t), 1);
253
254 if (mfi == NULL)
255 return (NULL);
256
257 dcmd = &mfi->mfi_frame->mf_dcmd;
258 dcmd->md_mbox_16[0] = dev_id;
259
260 ret = lmrc_issue_blocked_mfi(lmrc, mfi);
261
262 if (ret != DDI_SUCCESS)
263 goto out;
264
265 pdinfo = kmem_zalloc(sizeof (mfi_pd_info_t), KM_SLEEP);
266 bcopy(mfi->mfi_data_dma.ld_buf, pdinfo, sizeof (mfi_pd_info_t));
267
268 out:
269 lmrc_put_dcmd(lmrc, mfi);
270 return (pdinfo);
271 }
272
273 /*
274 * lmrc_phys_tgt_activate_cb
275 *
276 * Set up a tgt structure for a newly discovered PD.
277 */
278 static void
lmrc_phys_tgt_activate_cb(void * tgtmap_priv,char * tgt_addr,scsi_tgtmap_tgt_type_t type,void ** tgt_privp)279 lmrc_phys_tgt_activate_cb(void *tgtmap_priv, char *tgt_addr,
280 scsi_tgtmap_tgt_type_t type, void **tgt_privp)
281 {
282 lmrc_t *lmrc = tgtmap_priv;
283 lmrc_tgt_t *tgt = *tgt_privp;
284 uint16_t dev_id = tgt - lmrc->l_targets;
285 mfi_pd_info_t *pd_info;
286
287 VERIFY(lmrc == tgt->tgt_lmrc);
288
289 dev_id -= LMRC_MAX_LD;
290
291 VERIFY3U(dev_id, <, LMRC_MAX_PD);
292
293 pd_info = lmrc_get_pd_info(lmrc, dev_id);
294 if (pd_info == NULL)
295 return;
296
297 lmrc_tgt_init(tgt, dev_id, tgt_addr, pd_info);
298 }
299
300 /*
301 * lmrc_phys_tgt_deactivate_cb
302 *
303 * Tear down the tgt structure of a PD that is no longer present.
304 */
305 static boolean_t
lmrc_phys_tgt_deactivate_cb(void * tgtmap_priv,char * tgt_addr,scsi_tgtmap_tgt_type_t type,void * tgt_priv,scsi_tgtmap_deact_rsn_t deact)306 lmrc_phys_tgt_deactivate_cb(void *tgtmap_priv, char *tgt_addr,
307 scsi_tgtmap_tgt_type_t type, void *tgt_priv, scsi_tgtmap_deact_rsn_t deact)
308 {
309 lmrc_t *lmrc = tgtmap_priv;
310 lmrc_tgt_t *tgt = tgt_priv;
311
312 VERIFY(lmrc == tgt->tgt_lmrc);
313
314 lmrc_tgt_clear(tgt);
315
316 return (B_FALSE);
317 }
318
319 /*
320 * lmrc_phys_update_tgtmap
321 *
322 * Feed the PD list into the target map.
323 */
324 static int
lmrc_phys_update_tgtmap(lmrc_t * lmrc,mfi_pd_list_t * pd_list)325 lmrc_phys_update_tgtmap(lmrc_t *lmrc, mfi_pd_list_t *pd_list)
326 {
327 int ret;
328 int i;
329
330 if (pd_list->pl_count > LMRC_MAX_PD)
331 return (DDI_FAILURE);
332
333 ret = scsi_hba_tgtmap_set_begin(lmrc->l_phys_tgtmap);
334 if (ret != DDI_SUCCESS)
335 return (ret);
336
337 for (i = 0; i < pd_list->pl_count; i++) {
338 mfi_pd_addr_t *pa = &pd_list->pl_addr[i];
339 char name[SCSI_WWN_BUFLEN];
340
341 if (pa->pa_dev_id > MFI_MAX_PHYSICAL_DRIVES) {
342 dev_err(lmrc->l_dip, CE_WARN,
343 "!%s: invalid PD dev id %d", __func__,
344 pa->pa_dev_id);
345 goto fail;
346 }
347
348 if (scsi_wwn_to_wwnstr(pa->pa_sas_addr[0], 1, name) == NULL)
349 goto fail;
350
351 ret = scsi_hba_tgtmap_set_add(lmrc->l_phys_tgtmap,
352 SCSI_TGT_SCSI_DEVICE, name,
353 &lmrc->l_targets[pa->pa_dev_id + LMRC_MAX_LD]);
354
355 if (ret != DDI_SUCCESS)
356 goto fail;
357 }
358
359 return (scsi_hba_tgtmap_set_end(lmrc->l_phys_tgtmap, 0));
360
361 fail:
362 (void) scsi_hba_tgtmap_set_flush(lmrc->l_raid_tgtmap);
363 return (DDI_FAILURE);
364 }
365
366 /*
367 * lmrc_get_pd_list
368 *
369 * Get the list of physical devices from the firmware and update the target map.
370 */
371 int
lmrc_get_pd_list(lmrc_t * lmrc)372 lmrc_get_pd_list(lmrc_t *lmrc)
373 {
374 lmrc_mfi_cmd_t *mfi;
375 mfi_dcmd_payload_t *dcmd;
376 int ret;
377
378 /* If the phys iport isn't attached yet, just return success. */
379 if (!INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_PHYS))
380 return (DDI_SUCCESS);
381
382 mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, MFI_DCMD_PD_LIST_QUERY,
383 sizeof (mfi_pd_list_t) + sizeof (mfi_pd_addr_t) * LMRC_MAX_PD, 1);
384
385 if (mfi == NULL)
386 return (DDI_FAILURE);
387
388 dcmd = &mfi->mfi_frame->mf_dcmd;
389 dcmd->md_mbox_8[0] = MFI_PD_QUERY_TYPE_EXPOSED_TO_HOST;
390
391 ret = lmrc_issue_blocked_mfi(lmrc, mfi);
392
393 if (ret != DDI_SUCCESS)
394 goto out;
395
396 ret = lmrc_phys_update_tgtmap(lmrc, mfi->mfi_data_dma.ld_buf);
397
398 out:
399 lmrc_put_dcmd(lmrc, mfi);
400 return (ret);
401 }
402
403 /*
404 * lmrc_phys_aen_handler
405 *
406 * Handle AENs with locale code MFI_EVT_LOCALE_PD. If the PD configuration
407 * changed, update the PD list and target map.
408 */
409
410 int
lmrc_phys_aen_handler(lmrc_t * lmrc,mfi_evt_detail_t * evt)411 lmrc_phys_aen_handler(lmrc_t *lmrc, mfi_evt_detail_t *evt)
412 {
413 int ret = DDI_SUCCESS;
414
415 switch (evt->evt_code) {
416 case MFI_EVT_PD_INSERTED:
417 case MFI_EVT_PD_REMOVED:
418 case MFI_EVT_PD_CHANGED:
419 /*
420 * For any change w.r.t. the PDs, refresh the PD list.
421 */
422 ret = lmrc_get_pd_list(lmrc);
423 break;
424
425 case MFI_EVT_PD_PATROL_READ_PROGRESS:
426 case MFI_EVT_PD_RESET:
427 break;
428
429 default:
430 ret = DDI_FAILURE;
431 }
432
433 return (ret);
434 }
435
436 int
lmrc_phys_attach(dev_info_t * dip)437 lmrc_phys_attach(dev_info_t *dip)
438 {
439 scsi_hba_tran_t *tran = ddi_get_driver_private(dip);
440 dev_info_t *pdip = ddi_get_parent(dip);
441 lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(pdip));
442 int ret;
443
444 VERIFY(tran != NULL);
445 VERIFY(lmrc != NULL);
446
447 if (lmrc->l_fw_fault)
448 return (DDI_FAILURE);
449
450 tran->tran_hba_private = lmrc;
451 lmrc->l_phys_dip = dip;
452
453 ret = scsi_hba_tgtmap_create(dip, SCSI_TM_FULLSET, MICROSEC,
454 2 * MICROSEC, lmrc, lmrc_phys_tgt_activate_cb,
455 lmrc_phys_tgt_deactivate_cb, &lmrc->l_phys_tgtmap);
456 if (ret != DDI_SUCCESS)
457 return (DDI_FAILURE);
458
459 if (lmrc->l_use_seqnum_jbod_fp)
460 if (lmrc_setup_pdmap(lmrc) != DDI_SUCCESS)
461 lmrc->l_use_seqnum_jbod_fp = B_FALSE;
462
463 INITLEVEL_SET(lmrc, LMRC_INITLEVEL_PHYS);
464
465 ret = lmrc_get_pd_list(lmrc);
466 if (ret != DDI_SUCCESS) {
467 dev_err(lmrc->l_dip, CE_WARN, "!Failed to get PD list.");
468 return (ret);
469 }
470
471 return (DDI_SUCCESS);
472 }
473
474 int
lmrc_phys_detach(dev_info_t * dip)475 lmrc_phys_detach(dev_info_t *dip)
476 {
477 dev_info_t *pdip = ddi_get_parent(dip);
478 lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(pdip));
479
480 VERIFY(lmrc != NULL);
481 INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_PHYS);
482
483 if (lmrc->l_phys_tgtmap != NULL) {
484 scsi_hba_tgtmap_destroy(lmrc->l_phys_tgtmap);
485 lmrc->l_phys_tgtmap = NULL;
486 }
487
488 lmrc->l_phys_dip = NULL;
489
490 return (DDI_SUCCESS);
491 }
492