1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
26 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
27 * Copyright 2023 Racktop Systems, Inc.
28 */
29
30 /*
31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms of all code within
35 * this file that is exclusively owned by LSI, with or without
36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 * License requirements, the following conditions are met:
38 *
39 * Neither the name of the author nor the names of its contributors may be
40 * used to endorse or promote products derived from this software without
41 * specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 */
56
57 /*
58 * mptsas_raid - This file contains all the RAID related functions for the
59 * MPT interface.
60 */
61
62 #if defined(lint) || defined(DEBUG)
63 #define MPTSAS_DEBUG
64 #endif
65
66 #define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX 2
67
68 /*
69 * standard header files
70 */
71 #include <sys/note.h>
72 #include <sys/scsi/scsi.h>
73 #include <sys/byteorder.h>
74 #include <sys/raidioctl.h>
75
76 #pragma pack(1)
77
78 #include <sys/scsi/adapters/mpi/mpi2_type.h>
79 #include <sys/scsi/adapters/mpi/mpi2.h>
80 #include <sys/scsi/adapters/mpi/mpi2_cnfg.h>
81 #include <sys/scsi/adapters/mpi/mpi2_init.h>
82 #include <sys/scsi/adapters/mpi/mpi2_ioc.h>
83 #include <sys/scsi/adapters/mpi/mpi2_raid.h>
84 #include <sys/scsi/adapters/mpi/mpi2_tool.h>
85
86 #pragma pack()
87
88 /*
89 * private header files.
90 */
91 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
92
93 static int mptsas_get_raid_wwid(mptsas_t *mpt, mptsas_raidvol_t *raidvol);
94
95 extern int mptsas_check_dma_handle(ddi_dma_handle_t handle);
96 extern int mptsas_check_acc_handle(ddi_acc_handle_t handle);
97 extern mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t,
98 uint64_t, uint32_t, mptsas_phymask_t, uint8_t);
99
100 static int
mptsas_raidconf_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)101 mptsas_raidconf_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
102 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
103 va_list ap)
104 {
105 #ifndef __lock_lint
106 _NOTE(ARGUNUSED(ap))
107 #endif
108 pMpi2RaidConfigurationPage0_t raidconfig_page0;
109 pMpi2RaidConfig0ConfigElement_t element;
110 uint32_t *confignum;
111 int rval = DDI_SUCCESS, i;
112 uint8_t numelements, vol, disk;
113 uint16_t elementtype, voldevhandle;
114 uint16_t etype_vol, etype_pd, etype_hs;
115 uint16_t etype_oce;
116 m_raidconfig_t *raidconfig;
117 uint64_t raidwwn;
118 uint32_t native;
119 mptsas_target_t *ptgt;
120 uint32_t configindex;
121
122 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) {
123 return (DDI_FAILURE);
124 }
125
126 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
127 mptsas_log(mpt, CE_WARN, "mptsas_get_raid_conf_page0 "
128 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
129 iocstatus, iocloginfo);
130 rval = DDI_FAILURE;
131 return (rval);
132 }
133 confignum = va_arg(ap, uint32_t *);
134 configindex = va_arg(ap, uint32_t);
135 raidconfig_page0 = (pMpi2RaidConfigurationPage0_t)page_memp;
136 /*
137 * Get all RAID configurations.
138 */
139 etype_vol = MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT;
140 etype_pd = MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT;
141 etype_hs = MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT;
142 etype_oce = MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT;
143 /*
144 * Set up page address for next time through.
145 */
146 *confignum = ddi_get8(accessp,
147 &raidconfig_page0->ConfigNum);
148
149 /*
150 * Point to the right config in the structure.
151 * Increment the number of valid RAID configs.
152 */
153 raidconfig = &mpt->m_raidconfig[configindex];
154 mpt->m_num_raid_configs++;
155
156 /*
157 * Set the native flag if this is not a foreign
158 * configuration.
159 */
160 native = ddi_get32(accessp, &raidconfig_page0->Flags);
161 if (native & MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG) {
162 native = FALSE;
163 } else {
164 native = TRUE;
165 }
166 raidconfig->m_native = (uint8_t)native;
167
168 /*
169 * Get volume information for the volumes in the
170 * config.
171 */
172 numelements = ddi_get8(accessp, &raidconfig_page0->NumElements);
173 vol = 0;
174 disk = 0;
175 element = (pMpi2RaidConfig0ConfigElement_t)
176 &raidconfig_page0->ConfigElement;
177
178 for (i = 0; ((i < numelements) && native); i++, element++) {
179 /*
180 * Get the element type. Could be Volume,
181 * PhysDisk, Hot Spare, or Online Capacity
182 * Expansion PhysDisk.
183 */
184 elementtype = ddi_get16(accessp, &element->ElementFlags);
185 elementtype &= MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
186
187 /*
188 * For volumes, get the RAID settings and the
189 * WWID.
190 */
191 if (elementtype == etype_vol) {
192 voldevhandle = ddi_get16(accessp,
193 &element->VolDevHandle);
194 raidconfig->m_raidvol[vol].m_israid = 1;
195 raidconfig->m_raidvol[vol].
196 m_raidhandle = voldevhandle;
197 /*
198 * Get the settings for the raid
199 * volume. This includes the
200 * DevHandles for the disks making up
201 * the raid volume.
202 */
203 if (mptsas_get_raid_settings(mpt,
204 &raidconfig->m_raidvol[vol]))
205 continue;
206
207 /*
208 * Get the WWID of the RAID volume for
209 * SAS HBA
210 */
211 if (mptsas_get_raid_wwid(mpt,
212 &raidconfig->m_raidvol[vol]))
213 continue;
214
215 raidwwn = raidconfig->m_raidvol[vol].
216 m_raidwwid;
217
218 /*
219 * RAID uses phymask of 0.
220 */
221 ptgt = mptsas_tgt_alloc(mpt->m_targets,
222 voldevhandle, raidwwn, 0, 0, 0);
223
224 raidconfig->m_raidvol[vol].m_raidtgt =
225 ptgt;
226
227 /*
228 * Increment volume index within this
229 * raid config.
230 */
231 vol++;
232 } else if ((elementtype == etype_pd) ||
233 (elementtype == etype_hs) ||
234 (elementtype == etype_oce)) {
235 /*
236 * For all other element types, put
237 * their DevHandles in the phys disk
238 * list of the config. These are all
239 * some variation of a Phys Disk and
240 * this list is used to keep these
241 * disks from going online.
242 */
243 raidconfig->m_physdisk_devhdl[disk] = ddi_get16(accessp,
244 &element->PhysDiskDevHandle);
245
246 /*
247 * Increment disk index within this
248 * raid config.
249 */
250 disk++;
251 }
252 }
253
254 return (rval);
255 }
256
257 int
mptsas_get_raid_info(mptsas_t * mpt)258 mptsas_get_raid_info(mptsas_t *mpt)
259 {
260 int rval = DDI_SUCCESS;
261 uint32_t confignum, pageaddress;
262 uint8_t configindex;
263
264 ASSERT(mutex_owned(&mpt->m_mutex));
265
266 /*
267 * Clear all RAID info before starting.
268 */
269 bzero(mpt->m_raidconfig, sizeof (mpt->m_raidconfig));
270 mpt->m_num_raid_configs = 0;
271
272 configindex = 0;
273 confignum = 0xff;
274 pageaddress = MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM | confignum;
275 while (rval == DDI_SUCCESS) {
276 /*
277 * Get the header and config page. reply contains the reply
278 * frame, which holds status info for the request.
279 */
280 rval = mptsas_access_config_page(mpt,
281 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
282 MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG, 0, pageaddress,
283 mptsas_raidconf_page_0_cb, &confignum, configindex);
284 configindex++;
285 pageaddress = MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM |
286 confignum;
287 }
288
289 return (rval);
290 }
291
292 static int
mptsas_raidvol_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)293 mptsas_raidvol_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
294 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
295 va_list ap)
296 {
297 #ifndef __lock_lint
298 _NOTE(ARGUNUSED(ap))
299 #endif
300 pMpi2RaidVolPage0_t raidpage;
301 int rval = DDI_SUCCESS, i;
302 mptsas_raidvol_t *raidvol;
303 uint8_t numdisks, volstate, voltype, physdisknum;
304 uint32_t volsetting;
305 uint32_t statusflags, resync_flag;
306
307 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
308 return (DDI_FAILURE);
309
310 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
311 mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page0_cb "
312 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
313 iocstatus, iocloginfo);
314 rval = DDI_FAILURE;
315 return (rval);
316 }
317
318 raidvol = va_arg(ap, mptsas_raidvol_t *);
319
320 raidpage = (pMpi2RaidVolPage0_t)page_memp;
321 volstate = ddi_get8(accessp, &raidpage->VolumeState);
322 volsetting = ddi_get32(accessp,
323 (uint32_t *)(void *)&raidpage->VolumeSettings);
324 statusflags = ddi_get32(accessp, &raidpage->VolumeStatusFlags);
325 voltype = ddi_get8(accessp, &raidpage->VolumeType);
326
327 raidvol->m_state = volstate;
328 raidvol->m_statusflags = statusflags;
329 /*
330 * Volume size is not used right now. Set to 0.
331 */
332 raidvol->m_raidsize = 0;
333 raidvol->m_settings = volsetting;
334 raidvol->m_raidlevel = voltype;
335
336 if (statusflags & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED) {
337 mptsas_log(mpt, CE_NOTE, "?Volume %d is quiesced\n",
338 raidvol->m_raidhandle);
339 }
340
341 if (statusflags &
342 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
343 mptsas_log(mpt, CE_NOTE, "?Volume %d is resyncing\n",
344 raidvol->m_raidhandle);
345 }
346
347 resync_flag = MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
348 switch (volstate) {
349 case MPI2_RAID_VOL_STATE_OPTIMAL:
350 mptsas_log(mpt, CE_NOTE, "?Volume %d is "
351 "optimal\n", raidvol->m_raidhandle);
352 break;
353 case MPI2_RAID_VOL_STATE_DEGRADED:
354 if ((statusflags & resync_flag) == 0) {
355 mptsas_log(mpt, CE_WARN, "Volume %d "
356 "is degraded\n",
357 raidvol->m_raidhandle);
358 }
359 break;
360 case MPI2_RAID_VOL_STATE_FAILED:
361 mptsas_log(mpt, CE_WARN, "Volume %d is "
362 "failed\n", raidvol->m_raidhandle);
363 break;
364 case MPI2_RAID_VOL_STATE_MISSING:
365 mptsas_log(mpt, CE_WARN, "Volume %d is "
366 "missing\n", raidvol->m_raidhandle);
367 break;
368 default:
369 break;
370 }
371 numdisks = raidpage->NumPhysDisks;
372 raidvol->m_ndisks = numdisks;
373 for (i = 0; i < numdisks; i++) {
374 physdisknum = raidpage->PhysDisk[i].PhysDiskNum;
375 raidvol->m_disknum[i] = physdisknum;
376 if (mptsas_get_physdisk_settings(mpt, raidvol,
377 physdisknum))
378 break;
379 }
380 return (rval);
381 }
382
383 int
mptsas_get_raid_settings(mptsas_t * mpt,mptsas_raidvol_t * raidvol)384 mptsas_get_raid_settings(mptsas_t *mpt, mptsas_raidvol_t *raidvol)
385 {
386 int rval = DDI_SUCCESS;
387 uint32_t page_address;
388
389 ASSERT(mutex_owned(&mpt->m_mutex));
390
391 /*
392 * Get the header and config page. reply contains the reply frame,
393 * which holds status info for the request.
394 */
395 page_address = (MPI2_RAID_VOLUME_PGAD_FORM_MASK &
396 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE) | raidvol->m_raidhandle;
397 rval = mptsas_access_config_page(mpt,
398 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
399 MPI2_CONFIG_PAGETYPE_RAID_VOLUME, 0, page_address,
400 mptsas_raidvol_page_0_cb, raidvol);
401
402 return (rval);
403 }
404
405 static int
mptsas_raidvol_page_1_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)406 mptsas_raidvol_page_1_cb(mptsas_t *mpt, caddr_t page_memp,
407 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
408 va_list ap)
409 {
410 #ifndef __lock_lint
411 _NOTE(ARGUNUSED(ap))
412 #endif
413 pMpi2RaidVolPage1_t raidpage;
414 int rval = DDI_SUCCESS, i;
415 uint8_t *sas_addr = NULL;
416 uint8_t tmp_sas_wwn[SAS_WWN_BYTE_SIZE];
417 uint64_t *sas_wwn;
418
419 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
420 mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page_1_cb "
421 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
422 iocstatus, iocloginfo);
423 rval = DDI_FAILURE;
424 return (rval);
425 }
426 sas_wwn = va_arg(ap, uint64_t *);
427
428 raidpage = (pMpi2RaidVolPage1_t)page_memp;
429 sas_addr = (uint8_t *)(&raidpage->WWID);
430 for (i = 0; i < SAS_WWN_BYTE_SIZE; i++) {
431 tmp_sas_wwn[i] = ddi_get8(accessp, sas_addr + i);
432 }
433 bcopy(tmp_sas_wwn, sas_wwn, SAS_WWN_BYTE_SIZE);
434 *sas_wwn = LE_64(*sas_wwn);
435 return (rval);
436 }
437
438 static int
mptsas_get_raid_wwid(mptsas_t * mpt,mptsas_raidvol_t * raidvol)439 mptsas_get_raid_wwid(mptsas_t *mpt, mptsas_raidvol_t *raidvol)
440 {
441 int rval = DDI_SUCCESS;
442 uint32_t page_address;
443 uint64_t sas_wwn;
444
445 ASSERT(mutex_owned(&mpt->m_mutex));
446
447 /*
448 * Get the header and config page. reply contains the reply frame,
449 * which holds status info for the request.
450 */
451 page_address = (MPI2_RAID_VOLUME_PGAD_FORM_MASK &
452 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE) | raidvol->m_raidhandle;
453 rval = mptsas_access_config_page(mpt,
454 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
455 MPI2_CONFIG_PAGETYPE_RAID_VOLUME, 1, page_address,
456 mptsas_raidvol_page_1_cb, &sas_wwn);
457
458 /*
459 * Get the required information from the page.
460 */
461 if (rval == DDI_SUCCESS) {
462
463 /*
464 * replace top nibble of WWID of RAID to '3' for OBP
465 */
466 sas_wwn = MPTSAS_RAID_WWID(sas_wwn);
467 raidvol->m_raidwwid = sas_wwn;
468 }
469
470 return (rval);
471 }
472
473 static int
mptsas_raidphydsk_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)474 mptsas_raidphydsk_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
475 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
476 va_list ap)
477 {
478 #ifndef __lock_lint
479 _NOTE(ARGUNUSED(ap))
480 #endif
481 pMpi2RaidPhysDiskPage0_t diskpage;
482 int rval = DDI_SUCCESS;
483 uint16_t *devhdl;
484 uint8_t *state;
485
486 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
487 return (DDI_FAILURE);
488
489 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
490 mptsas_log(mpt, CE_WARN, "mptsas_raidphydsk_page0_cb "
491 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
492 iocstatus, iocloginfo);
493 rval = DDI_FAILURE;
494 return (rval);
495 }
496 devhdl = va_arg(ap, uint16_t *);
497 state = va_arg(ap, uint8_t *);
498 diskpage = (pMpi2RaidPhysDiskPage0_t)page_memp;
499 *devhdl = ddi_get16(accessp, &diskpage->DevHandle);
500 *state = ddi_get8(accessp, &diskpage->PhysDiskState);
501 return (rval);
502 }
503
504 int
mptsas_get_physdisk_settings(mptsas_t * mpt,mptsas_raidvol_t * raidvol,uint8_t physdisknum)505 mptsas_get_physdisk_settings(mptsas_t *mpt, mptsas_raidvol_t *raidvol,
506 uint8_t physdisknum)
507 {
508 int rval = DDI_SUCCESS, i;
509 uint8_t state;
510 uint16_t devhdl;
511 uint32_t page_address;
512
513 ASSERT(mutex_owned(&mpt->m_mutex));
514
515 /*
516 * Get the header and config page. reply contains the reply frame,
517 * which holds status info for the request.
518 */
519 page_address = (MPI2_PHYSDISK_PGAD_FORM_MASK &
520 MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM) | physdisknum;
521 rval = mptsas_access_config_page(mpt,
522 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
523 MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK, 0, page_address,
524 mptsas_raidphydsk_page_0_cb, &devhdl, &state);
525
526 /*
527 * Get the required information from the page.
528 */
529 if (rval == DDI_SUCCESS) {
530 for (i = 0; i < MPTSAS_MAX_DISKS_IN_VOL; i++) {
531 /* find the correct position in the arrays */
532 if (raidvol->m_disknum[i] == physdisknum)
533 break;
534 }
535 raidvol->m_devhdl[i] = devhdl;
536
537 switch (state) {
538 case MPI2_RAID_PD_STATE_OFFLINE:
539 raidvol->m_diskstatus[i] =
540 RAID_DISKSTATUS_FAILED;
541 break;
542
543 case MPI2_RAID_PD_STATE_HOT_SPARE:
544 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
545 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
546 break;
547
548 case MPI2_RAID_PD_STATE_DEGRADED:
549 case MPI2_RAID_PD_STATE_OPTIMAL:
550 case MPI2_RAID_PD_STATE_REBUILDING:
551 case MPI2_RAID_PD_STATE_ONLINE:
552 default:
553 raidvol->m_diskstatus[i] =
554 RAID_DISKSTATUS_GOOD;
555 break;
556 }
557 }
558
559 return (rval);
560 }
561
562 /*
563 * RAID Action for System Shutdown. This request uses the dedicated TM slot to
564 * avoid a call to mptsas_save_cmd. Since Solaris requires that the mutex is
565 * not held during the mptsas_quiesce function, this RAID action must not use
566 * the normal code path of requests and replies.
567 */
568 void
mptsas_raid_action_system_shutdown(mptsas_t * mpt)569 mptsas_raid_action_system_shutdown(mptsas_t *mpt)
570 {
571 pMpi2RaidActionRequest_t action;
572 uint8_t ir_active = FALSE, reply_type;
573 uint8_t function, found_reply = FALSE;
574 uint16_t SMID, action_type;
575 mptsas_slots_t *slots = mpt->m_active;
576 int config, vol;
577 mptsas_cmd_t *cmd;
578 uint32_t reply_addr;
579 uint64_t request_desc;
580 int cnt;
581 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
582 pMPI2DefaultReply_t reply;
583 pMpi2AddressReplyDescriptor_t address_reply;
584
585 /*
586 * Before doing the system shutdown RAID Action, make sure that the IOC
587 * supports IR and make sure there is a valid volume for the request.
588 */
589 if (mpt->m_ir_capable) {
590 for (config = 0; (config < mpt->m_num_raid_configs) &&
591 (!ir_active); config++) {
592 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
593 if (mpt->m_raidconfig[config].m_raidvol[vol].
594 m_israid) {
595 ir_active = TRUE;
596 break;
597 }
598 }
599 }
600 }
601 if (!ir_active) {
602 return;
603 }
604
605 /*
606 * If TM slot is already being used (highly unlikely), show message and
607 * don't issue the RAID action.
608 */
609 if (slots->m_slot[MPTSAS_TM_SLOT(mpt)] != NULL) {
610 mptsas_log(mpt, CE_WARN, "RAID Action slot in use. Cancelling"
611 " System Shutdown RAID Action.\n");
612 return;
613 }
614
615 /*
616 * Create the cmd and put it in the dedicated TM slot.
617 */
618 cmd = &(mpt->m_event_task_mgmt.m_event_cmd);
619 bzero((caddr_t)cmd, sizeof (*cmd));
620 cmd->cmd_pkt = NULL;
621 cmd->cmd_slot = MPTSAS_TM_SLOT(mpt);
622 slots->m_slot[MPTSAS_TM_SLOT(mpt)] = cmd;
623
624 /*
625 * Form message for raid action.
626 */
627 action = (pMpi2RaidActionRequest_t)(mpt->m_req_frame +
628 (mpt->m_req_frame_size * cmd->cmd_slot));
629 bzero(action, mpt->m_req_frame_size);
630 action->Function = MPI2_FUNCTION_RAID_ACTION;
631 action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
632
633 /*
634 * Send RAID Action.
635 */
636 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
637 DDI_DMA_SYNC_FORDEV);
638 request_desc = (cmd->cmd_slot << 16) +
639 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
640 MPTSAS_START_CMD(mpt, request_desc);
641
642 /*
643 * Even though reply does not matter because the system is shutting
644 * down, wait no more than 5 seconds here to get the reply just because
645 * we don't want to leave it hanging if it's coming. Poll because
646 * interrupts are disabled when this function is called.
647 */
648 for (cnt = 0; cnt < 5000; cnt++) {
649 /*
650 * Check for a reply.
651 */
652 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
653 DDI_DMA_SYNC_FORCPU);
654
655 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
656 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
657
658 if (ddi_get32(mpt->m_acc_post_queue_hdl,
659 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
660 ddi_get32(mpt->m_acc_post_queue_hdl,
661 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
662 drv_usecwait(1000);
663 continue;
664 }
665
666 /*
667 * There is a reply. If it's not an address reply, ignore it.
668 */
669 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
670 &reply_desc_union->Default.ReplyFlags);
671 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
672 if (reply_type != MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
673 goto clear_and_continue;
674 }
675
676 /*
677 * SMID must be the TM slot since that's what we're using for
678 * this RAID action. If not, ignore this reply.
679 */
680 address_reply =
681 (pMpi2AddressReplyDescriptor_t)reply_desc_union;
682 SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
683 &address_reply->SMID);
684 if (SMID != MPTSAS_TM_SLOT(mpt)) {
685 goto clear_and_continue;
686 }
687
688 /*
689 * If reply frame is not in the proper range ignore it.
690 */
691 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
692 &address_reply->ReplyFrameAddress);
693 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
694 (reply_addr >= (mpt->m_reply_frame_dma_addr +
695 (mpt->m_reply_frame_size * mpt->m_free_queue_depth))) ||
696 ((reply_addr - mpt->m_reply_frame_dma_addr) %
697 mpt->m_reply_frame_size != 0)) {
698 goto clear_and_continue;
699 }
700
701 /*
702 * If not a RAID action reply ignore it.
703 */
704 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
705 DDI_DMA_SYNC_FORCPU);
706 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame +
707 (reply_addr - mpt->m_reply_frame_dma_addr));
708 function = ddi_get8(mpt->m_acc_reply_frame_hdl,
709 &reply->Function);
710 if (function != MPI2_FUNCTION_RAID_ACTION) {
711 goto clear_and_continue;
712 }
713
714 /*
715 * Finally, make sure this is the System Shutdown RAID action.
716 * If not, ignore reply.
717 */
718 action_type = ddi_get16(mpt->m_acc_reply_frame_hdl,
719 &reply->FunctionDependent1);
720 if (action_type !=
721 MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED) {
722 goto clear_and_continue;
723 }
724 found_reply = TRUE;
725
726 clear_and_continue:
727 /*
728 * Clear the reply descriptor for re-use and increment index.
729 */
730 ddi_put64(mpt->m_acc_post_queue_hdl,
731 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
732 0xFFFFFFFFFFFFFFFF);
733 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
734 DDI_DMA_SYNC_FORDEV);
735
736 /*
737 * Update the global reply index and keep looking for the
738 * reply if not found yet.
739 */
740 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
741 mpt->m_post_index = 0;
742 }
743 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyPostHostIndex,
744 mpt->m_post_index);
745 if (!found_reply) {
746 continue;
747 }
748
749 break;
750 }
751
752 /*
753 * clear the used slot as the last step.
754 */
755 slots->m_slot[MPTSAS_TM_SLOT(mpt)] = NULL;
756 }
757
758 int
mptsas_delete_volume(mptsas_t * mpt,uint16_t volid)759 mptsas_delete_volume(mptsas_t *mpt, uint16_t volid)
760 {
761 int config, i = 0, vol = (-1);
762
763 for (config = 0; (config < mpt->m_num_raid_configs) && (vol != i);
764 config++) {
765 for (i = 0; i < MPTSAS_MAX_RAIDVOLS; i++) {
766 if (mpt->m_raidconfig[config].m_raidvol[i].
767 m_raidhandle == volid) {
768 vol = i;
769 break;
770 }
771 }
772 }
773
774 if (vol < 0) {
775 mptsas_log(mpt, CE_WARN, "raid doesn't exist at specified "
776 "target.");
777 return (-1);
778 }
779
780 mpt->m_raidconfig[config].m_raidvol[vol].m_israid = 0;
781 mpt->m_raidconfig[config].m_raidvol[vol].m_ndisks = 0;
782 for (i = 0; i < MPTSAS_MAX_DISKS_IN_VOL; i++) {
783 mpt->m_raidconfig[config].m_raidvol[vol].m_disknum[i] = 0;
784 mpt->m_raidconfig[config].m_raidvol[vol].m_devhdl[i] = 0;
785 }
786
787 return (0);
788 }
789