1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2008 Yahoo!, Inc.
5 * All rights reserved.
6 * Written by: John Baldwin <jhb@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * LSI MPT-Fusion Host Adapter FreeBSD userland interface
33 */
34
35 #include <sys/param.h>
36 #ifdef __amd64__
37 #include <sys/abi_compat.h>
38 #endif
39 #include <sys/conf.h>
40 #include <sys/errno.h>
41 #include <sys/ioccom.h>
42 #include <sys/mpt_ioctl.h>
43
44 #include <dev/mpt/mpt.h>
45
46 struct mpt_user_raid_action_result {
47 uint32_t volume_status;
48 uint32_t action_data[4];
49 uint16_t action_status;
50 };
51
52 struct mpt_page_memory {
53 bus_dma_tag_t tag;
54 bus_dmamap_t map;
55 bus_addr_t paddr;
56 void *vaddr;
57 };
58
59 static mpt_probe_handler_t mpt_user_probe;
60 static mpt_attach_handler_t mpt_user_attach;
61 static mpt_enable_handler_t mpt_user_enable;
62 static mpt_ready_handler_t mpt_user_ready;
63 static mpt_event_handler_t mpt_user_event;
64 static mpt_reset_handler_t mpt_user_reset;
65 static mpt_detach_handler_t mpt_user_detach;
66
67 static struct mpt_personality mpt_user_personality = {
68 .name = "mpt_user",
69 .probe = mpt_user_probe,
70 .attach = mpt_user_attach,
71 .enable = mpt_user_enable,
72 .ready = mpt_user_ready,
73 .event = mpt_user_event,
74 .reset = mpt_user_reset,
75 .detach = mpt_user_detach,
76 };
77
78 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
79
80 static mpt_reply_handler_t mpt_user_reply_handler;
81
82 static d_open_t mpt_open;
83 static d_close_t mpt_close;
84 static d_ioctl_t mpt_ioctl;
85
86 static struct cdevsw mpt_cdevsw = {
87 .d_version = D_VERSION,
88 .d_flags = 0,
89 .d_open = mpt_open,
90 .d_close = mpt_close,
91 .d_ioctl = mpt_ioctl,
92 .d_name = "mpt",
93 };
94
95 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
96
97 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
98
99 static int
mpt_user_probe(struct mpt_softc * mpt)100 mpt_user_probe(struct mpt_softc *mpt)
101 {
102
103 /* Attach to every controller. */
104 return (0);
105 }
106
107 static int
mpt_user_attach(struct mpt_softc * mpt)108 mpt_user_attach(struct mpt_softc *mpt)
109 {
110 mpt_handler_t handler;
111 int error, unit;
112
113 MPT_LOCK(mpt);
114 handler.reply_handler = mpt_user_reply_handler;
115 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
116 &user_handler_id);
117 MPT_UNLOCK(mpt);
118 if (error != 0) {
119 mpt_prt(mpt, "Unable to register user handler!\n");
120 return (error);
121 }
122 unit = device_get_unit(mpt->dev);
123 mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
124 "mpt%d", unit);
125 if (mpt->cdev == NULL) {
126 MPT_LOCK(mpt);
127 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
128 user_handler_id);
129 MPT_UNLOCK(mpt);
130 return (ENOMEM);
131 }
132 mpt->cdev->si_drv1 = mpt;
133 return (0);
134 }
135
136 static int
mpt_user_enable(struct mpt_softc * mpt)137 mpt_user_enable(struct mpt_softc *mpt)
138 {
139
140 return (0);
141 }
142
143 static void
mpt_user_ready(struct mpt_softc * mpt)144 mpt_user_ready(struct mpt_softc *mpt)
145 {
146
147 }
148
149 static int
mpt_user_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)150 mpt_user_event(struct mpt_softc *mpt, request_t *req,
151 MSG_EVENT_NOTIFY_REPLY *msg)
152 {
153
154 /* Someday we may want to let a user daemon listen for events? */
155 return (0);
156 }
157
158 static void
mpt_user_reset(struct mpt_softc * mpt,int type)159 mpt_user_reset(struct mpt_softc *mpt, int type)
160 {
161
162 }
163
164 static void
mpt_user_detach(struct mpt_softc * mpt)165 mpt_user_detach(struct mpt_softc *mpt)
166 {
167 mpt_handler_t handler;
168
169 /* XXX: do a purge of pending requests? */
170 destroy_dev(mpt->cdev);
171
172 MPT_LOCK(mpt);
173 handler.reply_handler = mpt_user_reply_handler;
174 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
175 user_handler_id);
176 MPT_UNLOCK(mpt);
177 }
178
179 static int
mpt_open(struct cdev * dev,int flags,int fmt,struct thread * td)180 mpt_open(struct cdev *dev, int flags, int fmt, struct thread *td)
181 {
182
183 return (0);
184 }
185
186 static int
mpt_close(struct cdev * dev,int flags,int fmt,struct thread * td)187 mpt_close(struct cdev *dev, int flags, int fmt, struct thread *td)
188 {
189
190 return (0);
191 }
192
193 static int
mpt_alloc_buffer(struct mpt_softc * mpt,struct mpt_page_memory * page_mem,size_t len)194 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
195 size_t len)
196 {
197 struct mpt_map_info mi;
198 int error;
199
200 page_mem->vaddr = NULL;
201
202 /* Limit requests to 16M. */
203 if (len > 16 * 1024 * 1024)
204 return (ENOSPC);
205 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
206 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
207 len, 1, len, 0, &page_mem->tag);
208 if (error)
209 return (error);
210 error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
211 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
212 if (error) {
213 bus_dma_tag_destroy(page_mem->tag);
214 return (error);
215 }
216 mi.mpt = mpt;
217 error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
218 len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
219 if (error == 0)
220 error = mi.error;
221 if (error) {
222 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
223 bus_dma_tag_destroy(page_mem->tag);
224 page_mem->vaddr = NULL;
225 return (error);
226 }
227 page_mem->paddr = mi.phys;
228 return (0);
229 }
230
231 static void
mpt_free_buffer(struct mpt_page_memory * page_mem)232 mpt_free_buffer(struct mpt_page_memory *page_mem)
233 {
234
235 if (page_mem->vaddr == NULL)
236 return;
237 bus_dmamap_unload(page_mem->tag, page_mem->map);
238 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
239 bus_dma_tag_destroy(page_mem->tag);
240 page_mem->vaddr = NULL;
241 }
242
243 static int
mpt_user_read_cfg_header(struct mpt_softc * mpt,struct mpt_cfg_page_req * page_req)244 mpt_user_read_cfg_header(struct mpt_softc *mpt,
245 struct mpt_cfg_page_req *page_req)
246 {
247 request_t *req;
248 cfgparms_t params;
249 MSG_CONFIG *cfgp;
250 int error;
251
252 req = mpt_get_request(mpt, TRUE);
253 if (req == NULL) {
254 mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
255 return (ENOMEM);
256 }
257
258 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
259 params.PageVersion = 0;
260 params.PageLength = 0;
261 params.PageNumber = page_req->header.PageNumber;
262 params.PageType = page_req->header.PageType;
263 params.PageAddress = le32toh(page_req->page_address);
264 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0,
265 TRUE, 5000);
266 if (error != 0) {
267 /*
268 * Leave the request. Without resetting the chip, it's
269 * still owned by it and we'll just get into trouble
270 * freeing it now. Mark it as abandoned so that if it
271 * shows up later it can be freed.
272 */
273 mpt_prt(mpt, "read_cfg_header timed out\n");
274 return (ETIMEDOUT);
275 }
276
277 page_req->ioc_status = htole16(req->IOCStatus);
278 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
279 cfgp = req->req_vbuf;
280 bcopy(&cfgp->Header, &page_req->header,
281 sizeof(page_req->header));
282 }
283 mpt_free_request(mpt, req);
284 return (0);
285 }
286
287 static int
mpt_user_read_cfg_page(struct mpt_softc * mpt,struct mpt_cfg_page_req * page_req,struct mpt_page_memory * mpt_page)288 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
289 struct mpt_page_memory *mpt_page)
290 {
291 CONFIG_PAGE_HEADER *hdr;
292 request_t *req;
293 cfgparms_t params;
294 int error;
295
296 req = mpt_get_request(mpt, TRUE);
297 if (req == NULL) {
298 mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
299 return (ENOMEM);
300 }
301
302 hdr = mpt_page->vaddr;
303 params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
304 params.PageVersion = hdr->PageVersion;
305 params.PageLength = hdr->PageLength;
306 params.PageNumber = hdr->PageNumber;
307 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
308 params.PageAddress = le32toh(page_req->page_address);
309 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
310 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
311 error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr,
312 le32toh(page_req->len), TRUE, 5000);
313 if (error != 0) {
314 mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
315 return (ETIMEDOUT);
316 }
317
318 page_req->ioc_status = htole16(req->IOCStatus);
319 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
320 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
321 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
322 mpt_free_request(mpt, req);
323 return (0);
324 }
325
326 static int
mpt_user_read_extcfg_header(struct mpt_softc * mpt,struct mpt_ext_cfg_page_req * ext_page_req)327 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
328 struct mpt_ext_cfg_page_req *ext_page_req)
329 {
330 request_t *req;
331 cfgparms_t params;
332 MSG_CONFIG_REPLY *cfgp;
333 int error;
334
335 req = mpt_get_request(mpt, TRUE);
336 if (req == NULL) {
337 mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
338 return (ENOMEM);
339 }
340
341 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
342 params.PageVersion = ext_page_req->header.PageVersion;
343 params.PageLength = 0;
344 params.PageNumber = ext_page_req->header.PageNumber;
345 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
346 params.PageAddress = le32toh(ext_page_req->page_address);
347 params.ExtPageType = ext_page_req->header.ExtPageType;
348 params.ExtPageLength = 0;
349 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0,
350 TRUE, 5000);
351 if (error != 0) {
352 /*
353 * Leave the request. Without resetting the chip, it's
354 * still owned by it and we'll just get into trouble
355 * freeing it now. Mark it as abandoned so that if it
356 * shows up later it can be freed.
357 */
358 mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
359 return (ETIMEDOUT);
360 }
361
362 ext_page_req->ioc_status = htole16(req->IOCStatus);
363 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
364 cfgp = req->req_vbuf;
365 ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
366 ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
367 ext_page_req->header.PageType = cfgp->Header.PageType;
368 ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
369 ext_page_req->header.ExtPageType = cfgp->ExtPageType;
370 }
371 mpt_free_request(mpt, req);
372 return (0);
373 }
374
375 static int
mpt_user_read_extcfg_page(struct mpt_softc * mpt,struct mpt_ext_cfg_page_req * ext_page_req,struct mpt_page_memory * mpt_page)376 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
377 struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
378 {
379 CONFIG_EXTENDED_PAGE_HEADER *hdr;
380 request_t *req;
381 cfgparms_t params;
382 int error;
383
384 req = mpt_get_request(mpt, TRUE);
385 if (req == NULL) {
386 mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
387 return (ENOMEM);
388 }
389
390 hdr = mpt_page->vaddr;
391 params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
392 params.PageVersion = hdr->PageVersion;
393 params.PageLength = 0;
394 params.PageNumber = hdr->PageNumber;
395 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
396 params.PageAddress = le32toh(ext_page_req->page_address);
397 params.ExtPageType = hdr->ExtPageType;
398 params.ExtPageLength = hdr->ExtPageLength;
399 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
400 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
401 error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr,
402 le32toh(ext_page_req->len), TRUE, 5000);
403 if (error != 0) {
404 mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
405 return (ETIMEDOUT);
406 }
407
408 ext_page_req->ioc_status = htole16(req->IOCStatus);
409 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
410 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
411 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
412 mpt_free_request(mpt, req);
413 return (0);
414 }
415
416 static int
mpt_user_write_cfg_page(struct mpt_softc * mpt,struct mpt_cfg_page_req * page_req,struct mpt_page_memory * mpt_page)417 mpt_user_write_cfg_page(struct mpt_softc *mpt,
418 struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
419 {
420 CONFIG_PAGE_HEADER *hdr;
421 request_t *req;
422 cfgparms_t params;
423 u_int hdr_attr;
424 int error;
425
426 hdr = mpt_page->vaddr;
427 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
428 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
429 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
430 mpt_prt(mpt, "page type 0x%x not changeable\n",
431 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
432 return (EINVAL);
433 }
434
435 #if 0
436 /*
437 * We shouldn't mask off other bits here.
438 */
439 hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
440 #endif
441
442 req = mpt_get_request(mpt, TRUE);
443 if (req == NULL)
444 return (ENOMEM);
445
446 bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
447 BUS_DMASYNC_PREWRITE);
448
449 /*
450 * There isn't any point in restoring stripped out attributes
451 * if you then mask them going down to issue the request.
452 */
453
454 params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
455 params.PageVersion = hdr->PageVersion;
456 params.PageLength = hdr->PageLength;
457 params.PageNumber = hdr->PageNumber;
458 params.PageAddress = le32toh(page_req->page_address);
459 #if 0
460 /* Restore stripped out attributes */
461 hdr->PageType |= hdr_attr;
462 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
463 #else
464 params.PageType = hdr->PageType;
465 #endif
466 error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr,
467 le32toh(page_req->len), TRUE, 5000);
468 if (error != 0) {
469 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
470 return (ETIMEDOUT);
471 }
472
473 page_req->ioc_status = htole16(req->IOCStatus);
474 bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
475 BUS_DMASYNC_POSTWRITE);
476 mpt_free_request(mpt, req);
477 return (0);
478 }
479
480 static int
mpt_user_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)481 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
482 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
483 {
484 MSG_RAID_ACTION_REPLY *reply;
485 struct mpt_user_raid_action_result *res;
486
487 if (req == NULL)
488 return (TRUE);
489
490 if (reply_frame != NULL) {
491 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
492 req->IOCStatus = le16toh(reply->IOCStatus);
493 res = (struct mpt_user_raid_action_result *)
494 (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
495 res->action_status = reply->ActionStatus;
496 res->volume_status = reply->VolumeStatus;
497 bcopy(&reply->ActionData, res->action_data,
498 sizeof(res->action_data));
499 }
500
501 req->state &= ~REQ_STATE_QUEUED;
502 req->state |= REQ_STATE_DONE;
503 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
504
505 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
506 wakeup(req);
507 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
508 /*
509 * Whew- we can free this request (late completion)
510 */
511 mpt_free_request(mpt, req);
512 }
513
514 return (TRUE);
515 }
516
517 /*
518 * We use the first part of the request buffer after the request frame
519 * to hold the action data and action status from the RAID reply. The
520 * rest of the request buffer is used to hold the buffer for the
521 * action SGE.
522 */
523 static int
mpt_user_raid_action(struct mpt_softc * mpt,struct mpt_raid_action * raid_act,struct mpt_page_memory * mpt_page)524 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
525 struct mpt_page_memory *mpt_page)
526 {
527 request_t *req;
528 struct mpt_user_raid_action_result *res;
529 MSG_RAID_ACTION_REQUEST *rap;
530 SGE_SIMPLE32 *se;
531 int error;
532
533 req = mpt_get_request(mpt, TRUE);
534 if (req == NULL)
535 return (ENOMEM);
536 rap = req->req_vbuf;
537 memset(rap, 0, sizeof *rap);
538 rap->Action = raid_act->action;
539 rap->ActionDataWord = raid_act->action_data_word;
540 rap->Function = MPI_FUNCTION_RAID_ACTION;
541 rap->VolumeID = raid_act->volume_id;
542 rap->VolumeBus = raid_act->volume_bus;
543 rap->PhysDiskNum = raid_act->phys_disk_num;
544 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
545 if (mpt_page->vaddr != NULL && raid_act->len != 0) {
546 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
547 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
548 se->Address = htole32(mpt_page->paddr);
549 MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
550 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
551 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
552 MPI_SGE_FLAGS_END_OF_LIST |
553 (raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
554 MPI_SGE_FLAGS_IOC_TO_HOST)));
555 }
556 se->FlagsLength = htole32(se->FlagsLength);
557 rap->MsgContext = htole32(req->index | user_handler_id);
558
559 mpt_check_doorbell(mpt);
560 mpt_send_cmd(mpt, req);
561
562 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
563 2000);
564 if (error != 0) {
565 /*
566 * Leave request so it can be cleaned up later.
567 */
568 mpt_prt(mpt, "mpt_user_raid_action timed out\n");
569 return (error);
570 }
571
572 raid_act->ioc_status = htole16(req->IOCStatus);
573 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
574 mpt_free_request(mpt, req);
575 return (0);
576 }
577
578 res = (struct mpt_user_raid_action_result *)
579 (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
580 raid_act->volume_status = res->volume_status;
581 raid_act->action_status = res->action_status;
582 bcopy(res->action_data, raid_act->action_data,
583 sizeof(res->action_data));
584 if (mpt_page->vaddr != NULL)
585 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
586 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
587 mpt_free_request(mpt, req);
588 return (0);
589 }
590
591 static int
mpt_ioctl(struct cdev * dev,u_long cmd,caddr_t arg,int flag,struct thread * td)592 mpt_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td)
593 {
594 struct mpt_softc *mpt;
595 struct mpt_cfg_page_req *page_req;
596 struct mpt_ext_cfg_page_req *ext_page_req;
597 struct mpt_raid_action *raid_act;
598 struct mpt_page_memory mpt_page;
599 #ifdef __amd64__
600 struct mpt_cfg_page_req32 *page_req32;
601 struct mpt_cfg_page_req page_req_swab;
602 struct mpt_ext_cfg_page_req32 *ext_page_req32;
603 struct mpt_ext_cfg_page_req ext_page_req_swab;
604 struct mpt_raid_action32 *raid_act32;
605 struct mpt_raid_action raid_act_swab;
606 #endif
607 int error;
608
609 mpt = dev->si_drv1;
610 page_req = (void *)arg;
611 ext_page_req = (void *)arg;
612 raid_act = (void *)arg;
613 mpt_page.vaddr = NULL;
614
615 #ifdef __amd64__
616 /* Convert 32-bit structs to native ones. */
617 page_req32 = (void *)arg;
618 ext_page_req32 = (void *)arg;
619 raid_act32 = (void *)arg;
620 switch (cmd) {
621 case MPTIO_READ_CFG_HEADER32:
622 case MPTIO_READ_CFG_PAGE32:
623 case MPTIO_WRITE_CFG_PAGE32:
624 page_req = &page_req_swab;
625 page_req->header = page_req32->header;
626 page_req->page_address = page_req32->page_address;
627 page_req->buf = PTRIN(page_req32->buf);
628 page_req->len = page_req32->len;
629 page_req->ioc_status = page_req32->ioc_status;
630 break;
631 case MPTIO_READ_EXT_CFG_HEADER32:
632 case MPTIO_READ_EXT_CFG_PAGE32:
633 ext_page_req = &ext_page_req_swab;
634 ext_page_req->header = ext_page_req32->header;
635 ext_page_req->page_address = ext_page_req32->page_address;
636 ext_page_req->buf = PTRIN(ext_page_req32->buf);
637 ext_page_req->len = ext_page_req32->len;
638 ext_page_req->ioc_status = ext_page_req32->ioc_status;
639 break;
640 case MPTIO_RAID_ACTION32:
641 raid_act = &raid_act_swab;
642 raid_act->action = raid_act32->action;
643 raid_act->volume_bus = raid_act32->volume_bus;
644 raid_act->volume_id = raid_act32->volume_id;
645 raid_act->phys_disk_num = raid_act32->phys_disk_num;
646 raid_act->action_data_word = raid_act32->action_data_word;
647 raid_act->buf = PTRIN(raid_act32->buf);
648 raid_act->len = raid_act32->len;
649 raid_act->volume_status = raid_act32->volume_status;
650 bcopy(raid_act32->action_data, raid_act->action_data,
651 sizeof(raid_act->action_data));
652 raid_act->action_status = raid_act32->action_status;
653 raid_act->ioc_status = raid_act32->ioc_status;
654 raid_act->write = raid_act32->write;
655 break;
656 }
657 #endif
658
659 switch (cmd) {
660 #ifdef __amd64__
661 case MPTIO_READ_CFG_HEADER32:
662 #endif
663 case MPTIO_READ_CFG_HEADER:
664 MPT_LOCK(mpt);
665 error = mpt_user_read_cfg_header(mpt, page_req);
666 MPT_UNLOCK(mpt);
667 break;
668 #ifdef __amd64__
669 case MPTIO_READ_CFG_PAGE32:
670 #endif
671 case MPTIO_READ_CFG_PAGE:
672 if (page_req->len < (int)sizeof(CONFIG_PAGE_HEADER)) {
673 error = EINVAL;
674 break;
675 }
676 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
677 if (error)
678 break;
679 error = copyin(page_req->buf, mpt_page.vaddr,
680 sizeof(CONFIG_PAGE_HEADER));
681 if (error)
682 break;
683 MPT_LOCK(mpt);
684 error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
685 MPT_UNLOCK(mpt);
686 if (error)
687 break;
688 error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
689 break;
690 #ifdef __amd64__
691 case MPTIO_READ_EXT_CFG_HEADER32:
692 #endif
693 case MPTIO_READ_EXT_CFG_HEADER:
694 MPT_LOCK(mpt);
695 error = mpt_user_read_extcfg_header(mpt, ext_page_req);
696 MPT_UNLOCK(mpt);
697 break;
698 #ifdef __amd64__
699 case MPTIO_READ_EXT_CFG_PAGE32:
700 #endif
701 case MPTIO_READ_EXT_CFG_PAGE:
702 if (ext_page_req->len <
703 (int)sizeof(CONFIG_EXTENDED_PAGE_HEADER)) {
704 error = EINVAL;
705 break;
706 }
707 error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
708 if (error)
709 break;
710 error = copyin(ext_page_req->buf, mpt_page.vaddr,
711 sizeof(CONFIG_EXTENDED_PAGE_HEADER));
712 if (error)
713 break;
714 MPT_LOCK(mpt);
715 error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
716 MPT_UNLOCK(mpt);
717 if (error)
718 break;
719 error = copyout(mpt_page.vaddr, ext_page_req->buf,
720 ext_page_req->len);
721 break;
722 #ifdef __amd64__
723 case MPTIO_WRITE_CFG_PAGE32:
724 #endif
725 case MPTIO_WRITE_CFG_PAGE:
726 if (page_req->len < (int)sizeof(CONFIG_PAGE_HEADER)) {
727 error = EINVAL;
728 break;
729 }
730 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
731 if (error)
732 break;
733 error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
734 if (error)
735 break;
736 MPT_LOCK(mpt);
737 error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
738 MPT_UNLOCK(mpt);
739 break;
740 #ifdef __amd64__
741 case MPTIO_RAID_ACTION32:
742 #endif
743 case MPTIO_RAID_ACTION:
744 if (raid_act->buf != NULL) {
745 error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
746 if (error)
747 break;
748 error = copyin(raid_act->buf, mpt_page.vaddr,
749 raid_act->len);
750 if (error)
751 break;
752 }
753 MPT_LOCK(mpt);
754 error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
755 MPT_UNLOCK(mpt);
756 if (error)
757 break;
758 if (raid_act->buf != NULL)
759 error = copyout(mpt_page.vaddr, raid_act->buf,
760 raid_act->len);
761 break;
762 default:
763 error = ENOIOCTL;
764 break;
765 }
766
767 mpt_free_buffer(&mpt_page);
768
769 if (error)
770 return (error);
771
772 #ifdef __amd64__
773 /* Convert native structs to 32-bit ones. */
774 switch (cmd) {
775 case MPTIO_READ_CFG_HEADER32:
776 case MPTIO_READ_CFG_PAGE32:
777 case MPTIO_WRITE_CFG_PAGE32:
778 page_req32->header = page_req->header;
779 page_req32->page_address = page_req->page_address;
780 page_req32->buf = PTROUT(page_req->buf);
781 page_req32->len = page_req->len;
782 page_req32->ioc_status = page_req->ioc_status;
783 break;
784 case MPTIO_READ_EXT_CFG_HEADER32:
785 case MPTIO_READ_EXT_CFG_PAGE32:
786 ext_page_req32->header = ext_page_req->header;
787 ext_page_req32->page_address = ext_page_req->page_address;
788 ext_page_req32->buf = PTROUT(ext_page_req->buf);
789 ext_page_req32->len = ext_page_req->len;
790 ext_page_req32->ioc_status = ext_page_req->ioc_status;
791 break;
792 case MPTIO_RAID_ACTION32:
793 raid_act32->action = raid_act->action;
794 raid_act32->volume_bus = raid_act->volume_bus;
795 raid_act32->volume_id = raid_act->volume_id;
796 raid_act32->phys_disk_num = raid_act->phys_disk_num;
797 raid_act32->action_data_word = raid_act->action_data_word;
798 raid_act32->buf = PTROUT(raid_act->buf);
799 raid_act32->len = raid_act->len;
800 raid_act32->volume_status = raid_act->volume_status;
801 bcopy(raid_act->action_data, raid_act32->action_data,
802 sizeof(raid_act->action_data));
803 raid_act32->action_status = raid_act->action_status;
804 raid_act32->ioc_status = raid_act->ioc_status;
805 raid_act32->write = raid_act->write;
806 break;
807 }
808 #endif
809
810 return (0);
811 }
812