1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
26 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
27 */
28
29 /*
30 * Copyright (c) 2000 to 2010, LSI Corporation.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms of all code within
34 * this file that is exclusively owned by LSI, with or without
35 * modification, is permitted provided that, in addition to the CDDL 1.0
36 * License requirements, the following conditions are met:
37 *
38 * Neither the name of the author nor the names of its contributors may be
39 * used to endorse or promote products derived from this software without
40 * specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
43 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
44 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
45 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
46 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
48 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
49 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
50 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
51 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
52 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
53 * DAMAGE.
54 */
55
56 /*
57 * mptsas_raid - This file contains all the RAID related functions for the
58 * MPT interface.
59 */
60
61 #if defined(lint) || defined(DEBUG)
62 #define MPTSAS_DEBUG
63 #endif
64
65 #define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX 2
66
67 /*
68 * standard header files
69 */
70 #include <sys/note.h>
71 #include <sys/scsi/scsi.h>
72 #include <sys/byteorder.h>
73 #include <sys/raidioctl.h>
74
75 #pragma pack(1)
76
77 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
78 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
84
85 #pragma pack()
86
87 /*
88 * private header files.
89 */
90 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
91
92 static int mptsas_get_raid_wwid(mptsas_t *mpt, mptsas_raidvol_t *raidvol);
93
94 extern int mptsas_check_dma_handle(ddi_dma_handle_t handle);
95 extern int mptsas_check_acc_handle(ddi_acc_handle_t handle);
96 extern mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t,
97 uint64_t, uint32_t, mptsas_phymask_t, uint8_t);
98
99 static int
mptsas_raidconf_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)100 mptsas_raidconf_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
101 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
102 va_list ap)
103 {
104 #ifndef __lock_lint
105 _NOTE(ARGUNUSED(ap))
106 #endif
107 pMpi2RaidConfigurationPage0_t raidconfig_page0;
108 pMpi2RaidConfig0ConfigElement_t element;
109 uint32_t *confignum;
110 int rval = DDI_SUCCESS, i;
111 uint8_t numelements, vol, disk;
112 uint16_t elementtype, voldevhandle;
113 uint16_t etype_vol, etype_pd, etype_hs;
114 uint16_t etype_oce;
115 m_raidconfig_t *raidconfig;
116 uint64_t raidwwn;
117 uint32_t native;
118 mptsas_target_t *ptgt;
119 uint32_t configindex;
120
121 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) {
122 return (DDI_FAILURE);
123 }
124
125 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
126 mptsas_log(mpt, CE_WARN, "mptsas_get_raid_conf_page0 "
127 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
128 iocstatus, iocloginfo);
129 rval = DDI_FAILURE;
130 return (rval);
131 }
132 confignum = va_arg(ap, uint32_t *);
133 configindex = va_arg(ap, uint32_t);
134 raidconfig_page0 = (pMpi2RaidConfigurationPage0_t)page_memp;
135 /*
136 * Get all RAID configurations.
137 */
138 etype_vol = MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT;
139 etype_pd = MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT;
140 etype_hs = MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT;
141 etype_oce = MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT;
142 /*
143 * Set up page address for next time through.
144 */
145 *confignum = ddi_get8(accessp,
146 &raidconfig_page0->ConfigNum);
147
148 /*
149 * Point to the right config in the structure.
150 * Increment the number of valid RAID configs.
151 */
152 raidconfig = &mpt->m_raidconfig[configindex];
153 mpt->m_num_raid_configs++;
154
155 /*
156 * Set the native flag if this is not a foreign
157 * configuration.
158 */
159 native = ddi_get32(accessp, &raidconfig_page0->Flags);
160 if (native & MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG) {
161 native = FALSE;
162 } else {
163 native = TRUE;
164 }
165 raidconfig->m_native = (uint8_t)native;
166
167 /*
168 * Get volume information for the volumes in the
169 * config.
170 */
171 numelements = ddi_get8(accessp, &raidconfig_page0->NumElements);
172 vol = 0;
173 disk = 0;
174 element = (pMpi2RaidConfig0ConfigElement_t)
175 &raidconfig_page0->ConfigElement;
176
177 for (i = 0; ((i < numelements) && native); i++, element++) {
178 /*
179 * Get the element type. Could be Volume,
180 * PhysDisk, Hot Spare, or Online Capacity
181 * Expansion PhysDisk.
182 */
183 elementtype = ddi_get16(accessp, &element->ElementFlags);
184 elementtype &= MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
185
186 /*
187 * For volumes, get the RAID settings and the
188 * WWID.
189 */
190 if (elementtype == etype_vol) {
191 voldevhandle = ddi_get16(accessp,
192 &element->VolDevHandle);
193 raidconfig->m_raidvol[vol].m_israid = 1;
194 raidconfig->m_raidvol[vol].
195 m_raidhandle = voldevhandle;
196 /*
197 * Get the settings for the raid
198 * volume. This includes the
199 * DevHandles for the disks making up
200 * the raid volume.
201 */
202 if (mptsas_get_raid_settings(mpt,
203 &raidconfig->m_raidvol[vol]))
204 continue;
205
206 /*
207 * Get the WWID of the RAID volume for
208 * SAS HBA
209 */
210 if (mptsas_get_raid_wwid(mpt,
211 &raidconfig->m_raidvol[vol]))
212 continue;
213
214 raidwwn = raidconfig->m_raidvol[vol].
215 m_raidwwid;
216
217 /*
218 * RAID uses phymask of 0.
219 */
220 ptgt = mptsas_tgt_alloc(mpt->m_targets,
221 voldevhandle, raidwwn, 0, 0, 0);
222
223 raidconfig->m_raidvol[vol].m_raidtgt =
224 ptgt;
225
226 /*
227 * Increment volume index within this
228 * raid config.
229 */
230 vol++;
231 } else if ((elementtype == etype_pd) ||
232 (elementtype == etype_hs) ||
233 (elementtype == etype_oce)) {
234 /*
235 * For all other element types, put
236 * their DevHandles in the phys disk
237 * list of the config. These are all
238 * some variation of a Phys Disk and
239 * this list is used to keep these
240 * disks from going online.
241 */
242 raidconfig->m_physdisk_devhdl[disk] = ddi_get16(accessp,
243 &element->PhysDiskDevHandle);
244
245 /*
246 * Increment disk index within this
247 * raid config.
248 */
249 disk++;
250 }
251 }
252
253 return (rval);
254 }
255
256 int
mptsas_get_raid_info(mptsas_t * mpt)257 mptsas_get_raid_info(mptsas_t *mpt)
258 {
259 int rval = DDI_SUCCESS;
260 uint32_t confignum, pageaddress;
261 uint8_t configindex;
262
263 ASSERT(mutex_owned(&mpt->m_mutex));
264
265 /*
266 * Clear all RAID info before starting.
267 */
268 bzero(mpt->m_raidconfig, sizeof (mpt->m_raidconfig));
269 mpt->m_num_raid_configs = 0;
270
271 configindex = 0;
272 confignum = 0xff;
273 pageaddress = MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM | confignum;
274 while (rval == DDI_SUCCESS) {
275 /*
276 * Get the header and config page. reply contains the reply
277 * frame, which holds status info for the request.
278 */
279 rval = mptsas_access_config_page(mpt,
280 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
281 MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG, 0, pageaddress,
282 mptsas_raidconf_page_0_cb, &confignum, configindex);
283 configindex++;
284 pageaddress = MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM |
285 confignum;
286 }
287
288 return (rval);
289 }
290
291 static int
mptsas_raidvol_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)292 mptsas_raidvol_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
293 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
294 va_list ap)
295 {
296 #ifndef __lock_lint
297 _NOTE(ARGUNUSED(ap))
298 #endif
299 pMpi2RaidVolPage0_t raidpage;
300 int rval = DDI_SUCCESS, i;
301 mptsas_raidvol_t *raidvol;
302 uint8_t numdisks, volstate, voltype, physdisknum;
303 uint32_t volsetting;
304 uint32_t statusflags, resync_flag;
305
306 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
307 return (DDI_FAILURE);
308
309 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
310 mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page0_cb "
311 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
312 iocstatus, iocloginfo);
313 rval = DDI_FAILURE;
314 return (rval);
315 }
316
317 raidvol = va_arg(ap, mptsas_raidvol_t *);
318
319 raidpage = (pMpi2RaidVolPage0_t)page_memp;
320 volstate = ddi_get8(accessp, &raidpage->VolumeState);
321 volsetting = ddi_get32(accessp,
322 (uint32_t *)(void *)&raidpage->VolumeSettings);
323 statusflags = ddi_get32(accessp, &raidpage->VolumeStatusFlags);
324 voltype = ddi_get8(accessp, &raidpage->VolumeType);
325
326 raidvol->m_state = volstate;
327 raidvol->m_statusflags = statusflags;
328 /*
329 * Volume size is not used right now. Set to 0.
330 */
331 raidvol->m_raidsize = 0;
332 raidvol->m_settings = volsetting;
333 raidvol->m_raidlevel = voltype;
334
335 if (statusflags & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED) {
336 mptsas_log(mpt, CE_NOTE, "?Volume %d is quiesced\n",
337 raidvol->m_raidhandle);
338 }
339
340 if (statusflags &
341 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
342 mptsas_log(mpt, CE_NOTE, "?Volume %d is resyncing\n",
343 raidvol->m_raidhandle);
344 }
345
346 resync_flag = MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
347 switch (volstate) {
348 case MPI2_RAID_VOL_STATE_OPTIMAL:
349 mptsas_log(mpt, CE_NOTE, "?Volume %d is "
350 "optimal\n", raidvol->m_raidhandle);
351 break;
352 case MPI2_RAID_VOL_STATE_DEGRADED:
353 if ((statusflags & resync_flag) == 0) {
354 mptsas_log(mpt, CE_WARN, "Volume %d "
355 "is degraded\n",
356 raidvol->m_raidhandle);
357 }
358 break;
359 case MPI2_RAID_VOL_STATE_FAILED:
360 mptsas_log(mpt, CE_WARN, "Volume %d is "
361 "failed\n", raidvol->m_raidhandle);
362 break;
363 case MPI2_RAID_VOL_STATE_MISSING:
364 mptsas_log(mpt, CE_WARN, "Volume %d is "
365 "missing\n", raidvol->m_raidhandle);
366 break;
367 default:
368 break;
369 }
370 numdisks = raidpage->NumPhysDisks;
371 raidvol->m_ndisks = numdisks;
372 for (i = 0; i < numdisks; i++) {
373 physdisknum = raidpage->PhysDisk[i].PhysDiskNum;
374 raidvol->m_disknum[i] = physdisknum;
375 if (mptsas_get_physdisk_settings(mpt, raidvol,
376 physdisknum))
377 break;
378 }
379 return (rval);
380 }
381
382 int
mptsas_get_raid_settings(mptsas_t * mpt,mptsas_raidvol_t * raidvol)383 mptsas_get_raid_settings(mptsas_t *mpt, mptsas_raidvol_t *raidvol)
384 {
385 int rval = DDI_SUCCESS;
386 uint32_t page_address;
387
388 ASSERT(mutex_owned(&mpt->m_mutex));
389
390 /*
391 * Get the header and config page. reply contains the reply frame,
392 * which holds status info for the request.
393 */
394 page_address = (MPI2_RAID_VOLUME_PGAD_FORM_MASK &
395 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE) | raidvol->m_raidhandle;
396 rval = mptsas_access_config_page(mpt,
397 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
398 MPI2_CONFIG_PAGETYPE_RAID_VOLUME, 0, page_address,
399 mptsas_raidvol_page_0_cb, raidvol);
400
401 return (rval);
402 }
403
404 static int
mptsas_raidvol_page_1_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)405 mptsas_raidvol_page_1_cb(mptsas_t *mpt, caddr_t page_memp,
406 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
407 va_list ap)
408 {
409 #ifndef __lock_lint
410 _NOTE(ARGUNUSED(ap))
411 #endif
412 pMpi2RaidVolPage1_t raidpage;
413 int rval = DDI_SUCCESS, i;
414 uint8_t *sas_addr = NULL;
415 uint8_t tmp_sas_wwn[SAS_WWN_BYTE_SIZE];
416 uint64_t *sas_wwn;
417
418 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
419 mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page_1_cb "
420 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
421 iocstatus, iocloginfo);
422 rval = DDI_FAILURE;
423 return (rval);
424 }
425 sas_wwn = va_arg(ap, uint64_t *);
426
427 raidpage = (pMpi2RaidVolPage1_t)page_memp;
428 sas_addr = (uint8_t *)(&raidpage->WWID);
429 for (i = 0; i < SAS_WWN_BYTE_SIZE; i++) {
430 tmp_sas_wwn[i] = ddi_get8(accessp, sas_addr + i);
431 }
432 bcopy(tmp_sas_wwn, sas_wwn, SAS_WWN_BYTE_SIZE);
433 *sas_wwn = LE_64(*sas_wwn);
434 return (rval);
435 }
436
437 static int
mptsas_get_raid_wwid(mptsas_t * mpt,mptsas_raidvol_t * raidvol)438 mptsas_get_raid_wwid(mptsas_t *mpt, mptsas_raidvol_t *raidvol)
439 {
440 int rval = DDI_SUCCESS;
441 uint32_t page_address;
442 uint64_t sas_wwn;
443
444 ASSERT(mutex_owned(&mpt->m_mutex));
445
446 /*
447 * Get the header and config page. reply contains the reply frame,
448 * which holds status info for the request.
449 */
450 page_address = (MPI2_RAID_VOLUME_PGAD_FORM_MASK &
451 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE) | raidvol->m_raidhandle;
452 rval = mptsas_access_config_page(mpt,
453 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
454 MPI2_CONFIG_PAGETYPE_RAID_VOLUME, 1, page_address,
455 mptsas_raidvol_page_1_cb, &sas_wwn);
456
457 /*
458 * Get the required information from the page.
459 */
460 if (rval == DDI_SUCCESS) {
461
462 /*
463 * replace top nibble of WWID of RAID to '3' for OBP
464 */
465 sas_wwn = MPTSAS_RAID_WWID(sas_wwn);
466 raidvol->m_raidwwid = sas_wwn;
467 }
468
469 return (rval);
470 }
471
472 static int
mptsas_raidphydsk_page_0_cb(mptsas_t * mpt,caddr_t page_memp,ddi_acc_handle_t accessp,uint16_t iocstatus,uint32_t iocloginfo,va_list ap)473 mptsas_raidphydsk_page_0_cb(mptsas_t *mpt, caddr_t page_memp,
474 ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
475 va_list ap)
476 {
477 #ifndef __lock_lint
478 _NOTE(ARGUNUSED(ap))
479 #endif
480 pMpi2RaidPhysDiskPage0_t diskpage;
481 int rval = DDI_SUCCESS;
482 uint16_t *devhdl;
483 uint8_t *state;
484
485 if (iocstatus == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
486 return (DDI_FAILURE);
487
488 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
489 mptsas_log(mpt, CE_WARN, "mptsas_raidphydsk_page0_cb "
490 "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
491 iocstatus, iocloginfo);
492 rval = DDI_FAILURE;
493 return (rval);
494 }
495 devhdl = va_arg(ap, uint16_t *);
496 state = va_arg(ap, uint8_t *);
497 diskpage = (pMpi2RaidPhysDiskPage0_t)page_memp;
498 *devhdl = ddi_get16(accessp, &diskpage->DevHandle);
499 *state = ddi_get8(accessp, &diskpage->PhysDiskState);
500 return (rval);
501 }
502
503 int
mptsas_get_physdisk_settings(mptsas_t * mpt,mptsas_raidvol_t * raidvol,uint8_t physdisknum)504 mptsas_get_physdisk_settings(mptsas_t *mpt, mptsas_raidvol_t *raidvol,
505 uint8_t physdisknum)
506 {
507 int rval = DDI_SUCCESS, i;
508 uint8_t state;
509 uint16_t devhdl;
510 uint32_t page_address;
511
512 ASSERT(mutex_owned(&mpt->m_mutex));
513
514 /*
515 * Get the header and config page. reply contains the reply frame,
516 * which holds status info for the request.
517 */
518 page_address = (MPI2_PHYSDISK_PGAD_FORM_MASK &
519 MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM) | physdisknum;
520 rval = mptsas_access_config_page(mpt,
521 MPI2_CONFIG_ACTION_PAGE_READ_CURRENT,
522 MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK, 0, page_address,
523 mptsas_raidphydsk_page_0_cb, &devhdl, &state);
524
525 /*
526 * Get the required information from the page.
527 */
528 if (rval == DDI_SUCCESS) {
529 for (i = 0; i < MPTSAS_MAX_DISKS_IN_VOL; i++) {
530 /* find the correct position in the arrays */
531 if (raidvol->m_disknum[i] == physdisknum)
532 break;
533 }
534 raidvol->m_devhdl[i] = devhdl;
535
536 switch (state) {
537 case MPI2_RAID_PD_STATE_OFFLINE:
538 raidvol->m_diskstatus[i] =
539 RAID_DISKSTATUS_FAILED;
540 break;
541
542 case MPI2_RAID_PD_STATE_HOT_SPARE:
543 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
544 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
545 break;
546
547 case MPI2_RAID_PD_STATE_DEGRADED:
548 case MPI2_RAID_PD_STATE_OPTIMAL:
549 case MPI2_RAID_PD_STATE_REBUILDING:
550 case MPI2_RAID_PD_STATE_ONLINE:
551 default:
552 raidvol->m_diskstatus[i] =
553 RAID_DISKSTATUS_GOOD;
554 break;
555 }
556 }
557
558 return (rval);
559 }
560
561 /*
562 * RAID Action for System Shutdown. This request uses the dedicated TM slot to
563 * avoid a call to mptsas_save_cmd. Since Solaris requires that the mutex is
564 * not held during the mptsas_quiesce function, this RAID action must not use
565 * the normal code path of requests and replies.
566 */
567 void
mptsas_raid_action_system_shutdown(mptsas_t * mpt)568 mptsas_raid_action_system_shutdown(mptsas_t *mpt)
569 {
570 pMpi2RaidActionRequest_t action;
571 uint8_t ir_active = FALSE, reply_type;
572 uint8_t function, found_reply = FALSE;
573 uint16_t SMID, action_type;
574 mptsas_slots_t *slots = mpt->m_active;
575 int config, vol;
576 mptsas_cmd_t *cmd;
577 uint32_t reply_addr;
578 uint64_t request_desc;
579 int cnt;
580 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
581 pMPI2DefaultReply_t reply;
582 pMpi2AddressReplyDescriptor_t address_reply;
583
584 /*
585 * Before doing the system shutdown RAID Action, make sure that the IOC
586 * supports IR and make sure there is a valid volume for the request.
587 */
588 if (mpt->m_ir_capable) {
589 for (config = 0; (config < mpt->m_num_raid_configs) &&
590 (!ir_active); config++) {
591 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
592 if (mpt->m_raidconfig[config].m_raidvol[vol].
593 m_israid) {
594 ir_active = TRUE;
595 break;
596 }
597 }
598 }
599 }
600 if (!ir_active) {
601 return;
602 }
603
604 /*
605 * If TM slot is already being used (highly unlikely), show message and
606 * don't issue the RAID action.
607 */
608 if (slots->m_slot[MPTSAS_TM_SLOT(mpt)] != NULL) {
609 mptsas_log(mpt, CE_WARN, "RAID Action slot in use. Cancelling"
610 " System Shutdown RAID Action.\n");
611 return;
612 }
613
614 /*
615 * Create the cmd and put it in the dedicated TM slot.
616 */
617 cmd = &(mpt->m_event_task_mgmt.m_event_cmd);
618 bzero((caddr_t)cmd, sizeof (*cmd));
619 cmd->cmd_pkt = NULL;
620 cmd->cmd_slot = MPTSAS_TM_SLOT(mpt);
621 slots->m_slot[MPTSAS_TM_SLOT(mpt)] = cmd;
622
623 /*
624 * Form message for raid action.
625 */
626 action = (pMpi2RaidActionRequest_t)(mpt->m_req_frame +
627 (mpt->m_req_frame_size * cmd->cmd_slot));
628 bzero(action, mpt->m_req_frame_size);
629 action->Function = MPI2_FUNCTION_RAID_ACTION;
630 action->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
631
632 /*
633 * Send RAID Action.
634 */
635 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
636 DDI_DMA_SYNC_FORDEV);
637 request_desc = (cmd->cmd_slot << 16) +
638 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
639 MPTSAS_START_CMD(mpt, request_desc);
640
641 /*
642 * Even though reply does not matter because the system is shutting
643 * down, wait no more than 5 seconds here to get the reply just because
644 * we don't want to leave it hanging if it's coming. Poll because
645 * interrupts are disabled when this function is called.
646 */
647 for (cnt = 0; cnt < 5000; cnt++) {
648 /*
649 * Check for a reply.
650 */
651 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
652 DDI_DMA_SYNC_FORCPU);
653
654 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
655 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
656
657 if (ddi_get32(mpt->m_acc_post_queue_hdl,
658 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
659 ddi_get32(mpt->m_acc_post_queue_hdl,
660 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
661 drv_usecwait(1000);
662 continue;
663 }
664
665 /*
666 * There is a reply. If it's not an address reply, ignore it.
667 */
668 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
669 &reply_desc_union->Default.ReplyFlags);
670 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
671 if (reply_type != MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
672 goto clear_and_continue;
673 }
674
675 /*
676 * SMID must be the TM slot since that's what we're using for
677 * this RAID action. If not, ignore this reply.
678 */
679 address_reply =
680 (pMpi2AddressReplyDescriptor_t)reply_desc_union;
681 SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
682 &address_reply->SMID);
683 if (SMID != MPTSAS_TM_SLOT(mpt)) {
684 goto clear_and_continue;
685 }
686
687 /*
688 * If reply frame is not in the proper range ignore it.
689 */
690 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
691 &address_reply->ReplyFrameAddress);
692 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
693 (reply_addr >= (mpt->m_reply_frame_dma_addr +
694 (mpt->m_reply_frame_size * mpt->m_free_queue_depth))) ||
695 ((reply_addr - mpt->m_reply_frame_dma_addr) %
696 mpt->m_reply_frame_size != 0)) {
697 goto clear_and_continue;
698 }
699
700 /*
701 * If not a RAID action reply ignore it.
702 */
703 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
704 DDI_DMA_SYNC_FORCPU);
705 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame +
706 (reply_addr - mpt->m_reply_frame_dma_addr));
707 function = ddi_get8(mpt->m_acc_reply_frame_hdl,
708 &reply->Function);
709 if (function != MPI2_FUNCTION_RAID_ACTION) {
710 goto clear_and_continue;
711 }
712
713 /*
714 * Finally, make sure this is the System Shutdown RAID action.
715 * If not, ignore reply.
716 */
717 action_type = ddi_get16(mpt->m_acc_reply_frame_hdl,
718 &reply->FunctionDependent1);
719 if (action_type !=
720 MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED) {
721 goto clear_and_continue;
722 }
723 found_reply = TRUE;
724
725 clear_and_continue:
726 /*
727 * Clear the reply descriptor for re-use and increment index.
728 */
729 ddi_put64(mpt->m_acc_post_queue_hdl,
730 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
731 0xFFFFFFFFFFFFFFFF);
732 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
733 DDI_DMA_SYNC_FORDEV);
734
735 /*
736 * Update the global reply index and keep looking for the
737 * reply if not found yet.
738 */
739 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
740 mpt->m_post_index = 0;
741 }
742 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyPostHostIndex,
743 mpt->m_post_index);
744 if (!found_reply) {
745 continue;
746 }
747
748 break;
749 }
750
751 /*
752 * clear the used slot as the last step.
753 */
754 slots->m_slot[MPTSAS_TM_SLOT(mpt)] = NULL;
755 }
756
757 int
mptsas_delete_volume(mptsas_t * mpt,uint16_t volid)758 mptsas_delete_volume(mptsas_t *mpt, uint16_t volid)
759 {
760 int config, i = 0, vol = (-1);
761
762 for (config = 0; (config < mpt->m_num_raid_configs) && (vol != i);
763 config++) {
764 for (i = 0; i < MPTSAS_MAX_RAIDVOLS; i++) {
765 if (mpt->m_raidconfig[config].m_raidvol[i].
766 m_raidhandle == volid) {
767 vol = i;
768 break;
769 }
770 }
771 }
772
773 if (vol < 0) {
774 mptsas_log(mpt, CE_WARN, "raid doesn't exist at specified "
775 "target.");
776 return (-1);
777 }
778
779 mpt->m_raidconfig[config].m_raidvol[vol].m_israid = 0;
780 mpt->m_raidconfig[config].m_raidvol[vol].m_ndisks = 0;
781 for (i = 0; i < MPTSAS_MAX_DISKS_IN_VOL; i++) {
782 mpt->m_raidconfig[config].m_raidvol[vol].m_disknum[i] = 0;
783 mpt->m_raidconfig[config].m_raidvol[vol].m_devhdl[i] = 0;
784 }
785
786 return (0);
787 }
788