1 /*
2 * Management Module Support for MPT (Message Passing Technology) based
3 * controllers
4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
6 * Copyright (C) 2012-2014 LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/compat.h>
55 #include <linux/poll.h>
56
57 #include <linux/io.h>
58 #include <linux/uaccess.h>
59
60 #include "mpt3sas_base.h"
61 #include "mpt3sas_ctl.h"
62
63
64 static struct fasync_struct *async_queue;
65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
66
67
68 /**
69 * enum block_state - blocking state
70 * @NON_BLOCKING: non blocking
71 * @BLOCKING: blocking
72 *
73 * These states are for ioctls that need to wait for a response
74 * from firmware, so they probably require sleep.
75 */
76 enum block_state {
77 NON_BLOCKING,
78 BLOCKING,
79 };
80
81 /**
82 * _ctl_display_some_debug - debug routine
83 * @ioc: per adapter object
84 * @smid: system request message index
85 * @calling_function_name: string pass from calling function
86 * @mpi_reply: reply message frame
87 * Context: none.
88 *
89 * Function for displaying debug info helpful when debugging issues
90 * in this module.
91 */
92 static void
_ctl_display_some_debug(struct MPT3SAS_ADAPTER * ioc,u16 smid,char * calling_function_name,MPI2DefaultReply_t * mpi_reply)93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
94 char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
95 {
96 Mpi2ConfigRequest_t *mpi_request;
97 char *desc = NULL;
98
99 if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
100 return;
101
102 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
103 switch (mpi_request->Function) {
104 case MPI2_FUNCTION_SCSI_IO_REQUEST:
105 {
106 Mpi2SCSIIORequest_t *scsi_request =
107 (Mpi2SCSIIORequest_t *)mpi_request;
108
109 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
110 "scsi_io, cmd(0x%02x), cdb_len(%d)",
111 scsi_request->CDB.CDB32[0],
112 le16_to_cpu(scsi_request->IoFlags) & 0xF);
113 desc = ioc->tmp_string;
114 break;
115 }
116 case MPI2_FUNCTION_SCSI_TASK_MGMT:
117 desc = "task_mgmt";
118 break;
119 case MPI2_FUNCTION_IOC_INIT:
120 desc = "ioc_init";
121 break;
122 case MPI2_FUNCTION_IOC_FACTS:
123 desc = "ioc_facts";
124 break;
125 case MPI2_FUNCTION_CONFIG:
126 {
127 Mpi2ConfigRequest_t *config_request =
128 (Mpi2ConfigRequest_t *)mpi_request;
129
130 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
131 "config, type(0x%02x), ext_type(0x%02x), number(%d)",
132 (config_request->Header.PageType &
133 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
134 config_request->Header.PageNumber);
135 desc = ioc->tmp_string;
136 break;
137 }
138 case MPI2_FUNCTION_PORT_FACTS:
139 desc = "port_facts";
140 break;
141 case MPI2_FUNCTION_PORT_ENABLE:
142 desc = "port_enable";
143 break;
144 case MPI2_FUNCTION_EVENT_NOTIFICATION:
145 desc = "event_notification";
146 break;
147 case MPI2_FUNCTION_FW_DOWNLOAD:
148 desc = "fw_download";
149 break;
150 case MPI2_FUNCTION_FW_UPLOAD:
151 desc = "fw_upload";
152 break;
153 case MPI2_FUNCTION_RAID_ACTION:
154 desc = "raid_action";
155 break;
156 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
157 {
158 Mpi2SCSIIORequest_t *scsi_request =
159 (Mpi2SCSIIORequest_t *)mpi_request;
160
161 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
162 "raid_pass, cmd(0x%02x), cdb_len(%d)",
163 scsi_request->CDB.CDB32[0],
164 le16_to_cpu(scsi_request->IoFlags) & 0xF);
165 desc = ioc->tmp_string;
166 break;
167 }
168 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
169 desc = "sas_iounit_cntl";
170 break;
171 case MPI2_FUNCTION_SATA_PASSTHROUGH:
172 desc = "sata_pass";
173 break;
174 case MPI2_FUNCTION_DIAG_BUFFER_POST:
175 desc = "diag_buffer_post";
176 break;
177 case MPI2_FUNCTION_DIAG_RELEASE:
178 desc = "diag_release";
179 break;
180 case MPI2_FUNCTION_SMP_PASSTHROUGH:
181 desc = "smp_passthrough";
182 break;
183 case MPI2_FUNCTION_TOOLBOX:
184 desc = "toolbox";
185 break;
186 case MPI2_FUNCTION_NVME_ENCAPSULATED:
187 desc = "nvme_encapsulated";
188 break;
189 case MPI2_FUNCTION_MCTP_PASSTHROUGH:
190 desc = "mctp_passthrough";
191 break;
192 }
193
194 if (!desc)
195 return;
196
197 ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
198
199 if (!mpi_reply)
200 return;
201
202 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
203 ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
204 le16_to_cpu(mpi_reply->IOCStatus),
205 le32_to_cpu(mpi_reply->IOCLogInfo));
206
207 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
208 mpi_request->Function ==
209 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
210 Mpi2SCSIIOReply_t *scsi_reply =
211 (Mpi2SCSIIOReply_t *)mpi_reply;
212 struct _sas_device *sas_device = NULL;
213 struct _pcie_device *pcie_device = NULL;
214
215 sas_device = mpt3sas_get_sdev_by_handle(ioc,
216 le16_to_cpu(scsi_reply->DevHandle));
217 if (sas_device) {
218 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
219 (u64)sas_device->sas_address,
220 sas_device->phy);
221 ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
222 (u64)sas_device->enclosure_logical_id,
223 sas_device->slot);
224 sas_device_put(sas_device);
225 }
226 if (!sas_device) {
227 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
228 le16_to_cpu(scsi_reply->DevHandle));
229 if (pcie_device) {
230 ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
231 (unsigned long long)pcie_device->wwid,
232 pcie_device->port_num);
233 if (pcie_device->enclosure_handle != 0)
234 ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
235 (u64)pcie_device->enclosure_logical_id,
236 pcie_device->slot);
237 pcie_device_put(pcie_device);
238 }
239 }
240 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
241 ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
242 scsi_reply->SCSIState,
243 scsi_reply->SCSIStatus);
244 }
245 }
246
247 /**
248 * mpt3sas_ctl_done - ctl module completion routine
249 * @ioc: per adapter object
250 * @smid: system request message index
251 * @msix_index: MSIX table index supplied by the OS
252 * @reply: reply message frame(lower 32bit addr)
253 * Context: none.
254 *
255 * The callback handler when using ioc->ctl_cb_idx.
256 *
257 * Return: 1 meaning mf should be freed from _base_interrupt
258 * 0 means the mf is freed from this function.
259 */
260 u8
mpt3sas_ctl_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)261 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
262 u32 reply)
263 {
264 MPI2DefaultReply_t *mpi_reply;
265 Mpi2SCSIIOReply_t *scsiio_reply;
266 Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
267 const void *sense_data;
268 u32 sz;
269
270 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
271 return 1;
272 if (ioc->ctl_cmds.smid != smid)
273 return 1;
274 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
275 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
276 if (mpi_reply) {
277 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
278 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
279 /* get sense data */
280 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
281 mpi_reply->Function ==
282 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
283 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
284 if (scsiio_reply->SCSIState &
285 MPI2_SCSI_STATE_AUTOSENSE_VALID) {
286 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
287 le32_to_cpu(scsiio_reply->SenseCount));
288 sense_data = mpt3sas_base_get_sense_buffer(ioc,
289 smid);
290 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
291 }
292 }
293 /*
294 * Get Error Response data for NVMe device. The ctl_cmds.sense
295 * buffer is used to store the Error Response data.
296 */
297 if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
298 nvme_error_reply =
299 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
300 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
301 le16_to_cpu(nvme_error_reply->ErrorResponseCount));
302 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
303 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
304 }
305 }
306
307 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
308 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
309 complete(&ioc->ctl_cmds.done);
310 return 1;
311 }
312
313 /**
314 * _ctl_check_event_type - determines when an event needs logging
315 * @ioc: per adapter object
316 * @event: firmware event
317 *
318 * The bitmask in ioc->event_type[] indicates which events should be
319 * be saved in the driver event_log. This bitmask is set by application.
320 *
321 * Return: 1 when event should be captured, or zero means no match.
322 */
323 static int
_ctl_check_event_type(struct MPT3SAS_ADAPTER * ioc,u16 event)324 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
325 {
326 u16 i;
327 u32 desired_event;
328
329 if (event >= 128 || !event || !ioc->event_log)
330 return 0;
331
332 desired_event = (1 << (event % 32));
333 if (!desired_event)
334 desired_event = 1;
335 i = event / 32;
336 return desired_event & ioc->event_type[i];
337 }
338
339 /**
340 * mpt3sas_ctl_add_to_event_log - add event
341 * @ioc: per adapter object
342 * @mpi_reply: reply message frame
343 */
344 void
mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER * ioc,Mpi2EventNotificationReply_t * mpi_reply)345 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
346 Mpi2EventNotificationReply_t *mpi_reply)
347 {
348 struct MPT3_IOCTL_EVENTS *event_log;
349 u16 event;
350 int i;
351 u32 sz, event_data_sz;
352 u8 send_aen = 0;
353
354 if (!ioc->event_log)
355 return;
356
357 event = le16_to_cpu(mpi_reply->Event);
358
359 if (_ctl_check_event_type(ioc, event)) {
360
361 /* insert entry into circular event_log */
362 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
363 event_log = ioc->event_log;
364 event_log[i].event = event;
365 event_log[i].context = ioc->event_context++;
366
367 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
368 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
369 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
370 memcpy(event_log[i].data, mpi_reply->EventData, sz);
371 send_aen = 1;
372 }
373
374 /* This aen_event_read_flag flag is set until the
375 * application has read the event log.
376 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
377 */
378 if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
379 (send_aen && !ioc->aen_event_read_flag)) {
380 ioc->aen_event_read_flag = 1;
381 wake_up_interruptible(&ctl_poll_wait);
382 if (async_queue)
383 kill_fasync(&async_queue, SIGIO, POLL_IN);
384 }
385 }
386
387 /**
388 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
389 * @ioc: per adapter object
390 * @msix_index: MSIX table index supplied by the OS
391 * @reply: reply message frame(lower 32bit addr)
392 * Context: interrupt.
393 *
394 * This function merely adds a new work task into ioc->firmware_event_thread.
395 * The tasks are worked from _firmware_event_work in user context.
396 *
397 * Return: 1 meaning mf should be freed from _base_interrupt
398 * 0 means the mf is freed from this function.
399 */
400 u8
mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)401 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
402 u32 reply)
403 {
404 Mpi2EventNotificationReply_t *mpi_reply;
405
406 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
407 if (mpi_reply)
408 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
409 return 1;
410 }
411
412 /**
413 * _ctl_verify_adapter - validates ioc_number passed from application
414 * @ioc_number: ?
415 * @iocpp: The ioc pointer is returned in this.
416 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
417 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
418 *
419 * Return: (-1) means error, else ioc_number.
420 */
421 static int
_ctl_verify_adapter(int ioc_number,struct MPT3SAS_ADAPTER ** iocpp,int mpi_version)422 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
423 int mpi_version)
424 {
425 struct MPT3SAS_ADAPTER *ioc;
426 int version = 0;
427 /* global ioc lock to protect controller on list operations */
428 spin_lock(&gioc_lock);
429 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
430 if (ioc->id != ioc_number)
431 continue;
432 /* Check whether this ioctl command is from right
433 * ioctl device or not, if not continue the search.
434 */
435 version = ioc->hba_mpi_version_belonged;
436 /* MPI25_VERSION and MPI26_VERSION uses same ioctl
437 * device.
438 */
439 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
440 if ((version == MPI25_VERSION) ||
441 (version == MPI26_VERSION))
442 goto out;
443 else
444 continue;
445 } else {
446 if (version != mpi_version)
447 continue;
448 }
449 out:
450 spin_unlock(&gioc_lock);
451 *iocpp = ioc;
452 return ioc_number;
453 }
454 spin_unlock(&gioc_lock);
455 *iocpp = NULL;
456 return -1;
457 }
458
459 /**
460 * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl)
461 * @ioc: per adapter object
462 *
463 * The handler for doing any required cleanup or initialization.
464 */
mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)465 void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
466 {
467 int i;
468 u8 issue_reset;
469
470 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
471 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
472 if (!(ioc->diag_buffer_status[i] &
473 MPT3_DIAG_BUFFER_IS_REGISTERED))
474 continue;
475 if ((ioc->diag_buffer_status[i] &
476 MPT3_DIAG_BUFFER_IS_RELEASED))
477 continue;
478
479 /*
480 * add a log message to indicate the release
481 */
482 ioc_info(ioc,
483 "%s: Releasing the trace buffer due to adapter reset.",
484 __func__);
485 ioc->htb_rel.buffer_rel_condition =
486 MPT3_DIAG_BUFFER_REL_TRIGGER;
487 mpt3sas_send_diag_release(ioc, i, &issue_reset);
488 }
489 }
490
491 /**
492 * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd.
493 * @ioc: per adapter object
494 *
495 * The handler for doing any required cleanup or initialization.
496 */
mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER * ioc)497 void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
498 {
499 dtmprintk(ioc,
500 ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__));
501 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
502 ioc->ctl_cmds.status |= MPT3_CMD_RESET;
503 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
504 complete(&ioc->ctl_cmds.done);
505 }
506 }
507
508 /**
509 * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl)
510 * @ioc: per adapter object
511 *
512 * The handler for doing any required cleanup or initialization.
513 */
mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)514 void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
515 {
516 int i;
517
518 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
519
520 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
521 if (!(ioc->diag_buffer_status[i] &
522 MPT3_DIAG_BUFFER_IS_REGISTERED))
523 continue;
524 if ((ioc->diag_buffer_status[i] &
525 MPT3_DIAG_BUFFER_IS_RELEASED))
526 continue;
527 ioc->diag_buffer_status[i] |=
528 MPT3_DIAG_BUFFER_IS_DIAG_RESET;
529 }
530 }
531
532 /**
533 * _ctl_fasync -
534 * @fd: ?
535 * @filep: ?
536 * @mode: ?
537 *
538 * Called when application request fasyn callback handler.
539 */
540 static int
_ctl_fasync(int fd,struct file * filep,int mode)541 _ctl_fasync(int fd, struct file *filep, int mode)
542 {
543 return fasync_helper(fd, filep, mode, &async_queue);
544 }
545
546 /**
547 * _ctl_poll -
548 * @filep: ?
549 * @wait: ?
550 *
551 */
552 static __poll_t
_ctl_poll(struct file * filep,poll_table * wait)553 _ctl_poll(struct file *filep, poll_table *wait)
554 {
555 struct MPT3SAS_ADAPTER *ioc;
556
557 poll_wait(filep, &ctl_poll_wait, wait);
558
559 /* global ioc lock to protect controller on list operations */
560 spin_lock(&gioc_lock);
561 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
562 if (ioc->aen_event_read_flag) {
563 spin_unlock(&gioc_lock);
564 return EPOLLIN | EPOLLRDNORM;
565 }
566 }
567 spin_unlock(&gioc_lock);
568 return 0;
569 }
570
571 /**
572 * _ctl_set_task_mid - assign an active smid to tm request
573 * @ioc: per adapter object
574 * @karg: (struct mpt3_ioctl_command)
575 * @tm_request: pointer to mf from user space
576 *
577 * Return: 0 when an smid if found, else fail.
578 * during failure, the reply frame is filled.
579 */
580 static int
_ctl_set_task_mid(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_command * karg,Mpi2SCSITaskManagementRequest_t * tm_request)581 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
582 Mpi2SCSITaskManagementRequest_t *tm_request)
583 {
584 bool found = false;
585 u16 smid;
586 u16 handle;
587 struct scsi_cmnd *scmd;
588 struct MPT3SAS_DEVICE *priv_data;
589 Mpi2SCSITaskManagementReply_t *tm_reply;
590 u32 sz;
591 u32 lun;
592 char *desc = NULL;
593
594 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
595 desc = "abort_task";
596 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
597 desc = "query_task";
598 else
599 return 0;
600
601 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
602
603 handle = le16_to_cpu(tm_request->DevHandle);
604 for (smid = ioc->scsiio_depth; smid && !found; smid--) {
605 struct scsiio_tracker *st;
606 __le16 task_mid;
607
608 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
609 if (!scmd)
610 continue;
611 if (lun != scmd->device->lun)
612 continue;
613 priv_data = scmd->device->hostdata;
614 if (priv_data->sas_target == NULL)
615 continue;
616 if (priv_data->sas_target->handle != handle)
617 continue;
618 st = scsi_cmd_priv(scmd);
619
620 /*
621 * If the given TaskMID from the user space is zero, then the
622 * first outstanding smid will be picked up. Otherwise,
623 * targeted smid will be the one.
624 */
625 task_mid = cpu_to_le16(st->smid);
626 if (!tm_request->TaskMID)
627 tm_request->TaskMID = task_mid;
628 found = tm_request->TaskMID == task_mid;
629 }
630
631 if (!found) {
632 dctlprintk(ioc,
633 ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
634 desc, le16_to_cpu(tm_request->DevHandle),
635 lun));
636 tm_reply = ioc->ctl_cmds.reply;
637 tm_reply->DevHandle = tm_request->DevHandle;
638 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
639 tm_reply->TaskType = tm_request->TaskType;
640 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
641 tm_reply->VP_ID = tm_request->VP_ID;
642 tm_reply->VF_ID = tm_request->VF_ID;
643 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
644 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
645 sz))
646 pr_err("failure at %s:%d/%s()!\n", __FILE__,
647 __LINE__, __func__);
648 return 1;
649 }
650
651 dctlprintk(ioc,
652 ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
653 desc, le16_to_cpu(tm_request->DevHandle), lun,
654 le16_to_cpu(tm_request->TaskMID)));
655 return 0;
656 }
657
658 /**
659 * _ctl_send_mctp_passthru_req - Send an MCTP passthru request
660 * @ioc: per adapter object
661 * @mctp_passthru_req: MPI mctp passhthru request from caller
662 * @psge: pointer to the H2DSGL
663 * @data_out_dma: DMA buffer for H2D SGL
664 * @data_out_sz: H2D length
665 * @data_in_dma: DMA buffer for D2H SGL
666 * @data_in_sz: D2H length
667 * @smid: SMID to submit the request
668 *
669 */
670 static void
_ctl_send_mctp_passthru_req(struct MPT3SAS_ADAPTER * ioc,Mpi26MctpPassthroughRequest_t * mctp_passthru_req,void * psge,dma_addr_t data_out_dma,int data_out_sz,dma_addr_t data_in_dma,int data_in_sz,u16 smid)671 _ctl_send_mctp_passthru_req(
672 struct MPT3SAS_ADAPTER *ioc,
673 Mpi26MctpPassthroughRequest_t *mctp_passthru_req, void *psge,
674 dma_addr_t data_out_dma, int data_out_sz,
675 dma_addr_t data_in_dma, int data_in_sz,
676 u16 smid)
677 {
678 mctp_passthru_req->H2DLength = data_out_sz;
679 mctp_passthru_req->D2HLength = data_in_sz;
680
681 /* Build the H2D SGL from the data out buffer */
682 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 0, 0);
683
684 psge += ioc->sge_size_ieee;
685
686 /* Build the D2H SGL for the data in buffer */
687 ioc->build_sg(ioc, psge, 0, 0, data_in_dma, data_in_sz);
688
689 ioc->put_smid_default(ioc, smid);
690 }
691
692 /**
693 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
694 * @ioc: per adapter object
695 * @karg: (struct mpt3_ioctl_command)
696 * @mf: pointer to mf in user space
697 */
698 static long
_ctl_do_mpt_command(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_command karg,void __user * mf)699 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
700 void __user *mf)
701 {
702 MPI2RequestHeader_t *mpi_request = NULL, *request;
703 MPI2DefaultReply_t *mpi_reply;
704 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
705 struct _pcie_device *pcie_device = NULL;
706 u16 smid;
707 unsigned long timeout;
708 u8 issue_reset;
709 u32 sz, sz_arg;
710 void *psge;
711 void *data_out = NULL;
712 dma_addr_t data_out_dma = 0;
713 size_t data_out_sz = 0;
714 void *data_in = NULL;
715 dma_addr_t data_in_dma = 0;
716 size_t data_in_sz = 0;
717 long ret;
718 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
719 int tm_ret;
720
721 issue_reset = 0;
722
723 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
724 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
725 ret = -EAGAIN;
726 goto out;
727 }
728
729 ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
730 if (ret)
731 goto out;
732
733 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
734 if (!mpi_request) {
735 ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
736 __func__);
737 ret = -ENOMEM;
738 goto out;
739 }
740
741 /* Check for overflow and wraparound */
742 if (karg.data_sge_offset * 4 > ioc->request_sz ||
743 karg.data_sge_offset > (UINT_MAX / 4)) {
744 ret = -EINVAL;
745 goto out;
746 }
747
748 /* copy in request message frame from user */
749 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
750 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
751 __func__);
752 ret = -EFAULT;
753 goto out;
754 }
755
756 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
757 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
758 if (!smid) {
759 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
760 ret = -EAGAIN;
761 goto out;
762 }
763 } else {
764 /* Use first reserved smid for passthrough ioctls */
765 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
766 }
767
768 ret = 0;
769 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
770 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
771 request = mpt3sas_base_get_msg_frame(ioc, smid);
772 memset(request, 0, ioc->request_sz);
773 memcpy(request, mpi_request, karg.data_sge_offset*4);
774 ioc->ctl_cmds.smid = smid;
775 data_out_sz = karg.data_out_size;
776 data_in_sz = karg.data_in_size;
777
778 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
779 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
780 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
781 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
782 mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
783
784 device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
785 if (!device_handle || (device_handle >
786 ioc->facts.MaxDevHandle)) {
787 ret = -EINVAL;
788 mpt3sas_base_free_smid(ioc, smid);
789 goto out;
790 }
791 }
792
793 /* obtain dma-able memory for data transfer */
794 if (data_out_sz) /* WRITE */ {
795 data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
796 &data_out_dma, GFP_KERNEL);
797 if (!data_out) {
798 pr_err("failure at %s:%d/%s()!\n", __FILE__,
799 __LINE__, __func__);
800 ret = -ENOMEM;
801 mpt3sas_base_free_smid(ioc, smid);
802 goto out;
803 }
804 if (copy_from_user(data_out, karg.data_out_buf_ptr,
805 data_out_sz)) {
806 pr_err("failure at %s:%d/%s()!\n", __FILE__,
807 __LINE__, __func__);
808 ret = -EFAULT;
809 mpt3sas_base_free_smid(ioc, smid);
810 goto out;
811 }
812 }
813
814 if (data_in_sz) /* READ */ {
815 data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
816 &data_in_dma, GFP_KERNEL);
817 if (!data_in) {
818 pr_err("failure at %s:%d/%s()!\n", __FILE__,
819 __LINE__, __func__);
820 ret = -ENOMEM;
821 mpt3sas_base_free_smid(ioc, smid);
822 goto out;
823 }
824 }
825
826 psge = (void *)request + (karg.data_sge_offset*4);
827
828 /* send command to firmware */
829 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
830
831 init_completion(&ioc->ctl_cmds.done);
832 switch (mpi_request->Function) {
833 case MPI2_FUNCTION_MCTP_PASSTHROUGH:
834 {
835 Mpi26MctpPassthroughRequest_t *mctp_passthru_req =
836 (Mpi26MctpPassthroughRequest_t *)request;
837
838 if (!(ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)) {
839 ioc_err(ioc, "%s: MCTP Passthrough request not supported\n",
840 __func__);
841 mpt3sas_base_free_smid(ioc, smid);
842 ret = -EINVAL;
843 goto out;
844 }
845
846 _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
847 data_out_sz, data_in_dma, data_in_sz, smid);
848 break;
849 }
850 case MPI2_FUNCTION_NVME_ENCAPSULATED:
851 {
852 nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
853 if (!ioc->pcie_sg_lookup) {
854 dtmprintk(ioc, ioc_info(ioc,
855 "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
856 ));
857
858 if (ioc->logging_level & MPT_DEBUG_TM)
859 _debug_dump_mf(nvme_encap_request,
860 ioc->request_sz/4);
861 mpt3sas_base_free_smid(ioc, smid);
862 ret = -EINVAL;
863 goto out;
864 }
865 /*
866 * Get the Physical Address of the sense buffer.
867 * Use Error Response buffer address field to hold the sense
868 * buffer address.
869 * Clear the internal sense buffer, which will potentially hold
870 * the Completion Queue Entry on return, or 0 if no Entry.
871 * Build the PRPs and set direction bits.
872 * Send the request.
873 */
874 nvme_encap_request->ErrorResponseBaseAddress =
875 cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
876 nvme_encap_request->ErrorResponseBaseAddress |=
877 cpu_to_le64(le32_to_cpu(
878 mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
879 nvme_encap_request->ErrorResponseAllocationLength =
880 cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
881 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
882 ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
883 data_out_dma, data_out_sz, data_in_dma, data_in_sz);
884 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
885 dtmprintk(ioc,
886 ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
887 device_handle));
888 mpt3sas_base_free_smid(ioc, smid);
889 ret = -EINVAL;
890 goto out;
891 }
892 mpt3sas_base_put_smid_nvme_encap(ioc, smid);
893 break;
894 }
895 case MPI2_FUNCTION_SCSI_IO_REQUEST:
896 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
897 {
898 Mpi2SCSIIORequest_t *scsiio_request =
899 (Mpi2SCSIIORequest_t *)request;
900 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
901 scsiio_request->SenseBufferLowAddress =
902 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
903 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
904 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
905 dtmprintk(ioc,
906 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
907 device_handle));
908 mpt3sas_base_free_smid(ioc, smid);
909 ret = -EINVAL;
910 goto out;
911 }
912 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
913 data_in_dma, data_in_sz);
914 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
915 ioc->put_smid_scsi_io(ioc, smid, device_handle);
916 else
917 ioc->put_smid_default(ioc, smid);
918 break;
919 }
920 case MPI2_FUNCTION_SCSI_TASK_MGMT:
921 {
922 Mpi2SCSITaskManagementRequest_t *tm_request =
923 (Mpi2SCSITaskManagementRequest_t *)request;
924
925 dtmprintk(ioc,
926 ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
927 le16_to_cpu(tm_request->DevHandle),
928 tm_request->TaskType));
929 ioc->got_task_abort_from_ioctl = 1;
930 if (tm_request->TaskType ==
931 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
932 tm_request->TaskType ==
933 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
934 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
935 mpt3sas_base_free_smid(ioc, smid);
936 ioc->got_task_abort_from_ioctl = 0;
937 goto out;
938 }
939 }
940 ioc->got_task_abort_from_ioctl = 0;
941
942 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
943 dtmprintk(ioc,
944 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
945 device_handle));
946 mpt3sas_base_free_smid(ioc, smid);
947 ret = -EINVAL;
948 goto out;
949 }
950 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
951 tm_request->DevHandle));
952 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
953 data_in_dma, data_in_sz);
954 ioc->put_smid_hi_priority(ioc, smid, 0);
955 break;
956 }
957 case MPI2_FUNCTION_SMP_PASSTHROUGH:
958 {
959 Mpi2SmpPassthroughRequest_t *smp_request =
960 (Mpi2SmpPassthroughRequest_t *)mpi_request;
961 u8 *data;
962
963 if (!ioc->multipath_on_hba) {
964 /* ioc determines which port to use */
965 smp_request->PhysicalPort = 0xFF;
966 }
967 if (smp_request->PassthroughFlags &
968 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
969 data = (u8 *)&smp_request->SGL;
970 else {
971 if (unlikely(data_out == NULL)) {
972 pr_err("failure at %s:%d/%s()!\n",
973 __FILE__, __LINE__, __func__);
974 mpt3sas_base_free_smid(ioc, smid);
975 ret = -EINVAL;
976 goto out;
977 }
978 data = data_out;
979 }
980
981 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
982 ioc->ioc_link_reset_in_progress = 1;
983 ioc->ignore_loginfos = 1;
984 }
985 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
986 data_in_sz);
987 ioc->put_smid_default(ioc, smid);
988 break;
989 }
990 case MPI2_FUNCTION_SATA_PASSTHROUGH:
991 {
992 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
993 dtmprintk(ioc,
994 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
995 device_handle));
996 mpt3sas_base_free_smid(ioc, smid);
997 ret = -EINVAL;
998 goto out;
999 }
1000 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
1001 data_in_sz);
1002 ioc->put_smid_default(ioc, smid);
1003 break;
1004 }
1005 case MPI2_FUNCTION_FW_DOWNLOAD:
1006 {
1007 if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
1008 ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n");
1009 ret = -EPERM;
1010 break;
1011 }
1012 fallthrough;
1013 }
1014 case MPI2_FUNCTION_FW_UPLOAD:
1015 {
1016 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
1017 data_in_sz);
1018 ioc->put_smid_default(ioc, smid);
1019 break;
1020 }
1021 case MPI2_FUNCTION_TOOLBOX:
1022 {
1023 Mpi2ToolboxCleanRequest_t *toolbox_request =
1024 (Mpi2ToolboxCleanRequest_t *)mpi_request;
1025
1026 if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
1027 || (toolbox_request->Tool ==
1028 MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN))
1029 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
1030 data_in_dma, data_in_sz);
1031 else if (toolbox_request->Tool ==
1032 MPI2_TOOLBOX_MEMORY_MOVE_TOOL) {
1033 Mpi2ToolboxMemMoveRequest_t *mem_move_request =
1034 (Mpi2ToolboxMemMoveRequest_t *)request;
1035 Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL;
1036
1037 ioc->build_sg_mpi(ioc, psge, data_out_dma,
1038 data_out_sz, data_in_dma, data_in_sz);
1039 if (data_out_sz && !data_in_sz) {
1040 dst =
1041 (Mpi2SGESimple64_t *)&mem_move_request->SGL;
1042 src = (void *)dst + ioc->sge_size;
1043
1044 memcpy(&tmp, src, ioc->sge_size);
1045 memcpy(src, dst, ioc->sge_size);
1046 memcpy(dst, &tmp, ioc->sge_size);
1047 }
1048 if (ioc->logging_level & MPT_DEBUG_TM) {
1049 ioc_info(ioc,
1050 "Mpi2ToolboxMemMoveRequest_t request msg\n");
1051 _debug_dump_mf(mem_move_request,
1052 ioc->request_sz/4);
1053 }
1054 } else
1055 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
1056 data_in_dma, data_in_sz);
1057 ioc->put_smid_default(ioc, smid);
1058 break;
1059 }
1060 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1061 {
1062 Mpi2SasIoUnitControlRequest_t *sasiounit_request =
1063 (Mpi2SasIoUnitControlRequest_t *)mpi_request;
1064
1065 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
1066 || sasiounit_request->Operation ==
1067 MPI2_SAS_OP_PHY_LINK_RESET) {
1068 ioc->ioc_link_reset_in_progress = 1;
1069 ioc->ignore_loginfos = 1;
1070 }
1071 /* drop to default case for posting the request */
1072 }
1073 fallthrough;
1074 default:
1075 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
1076 data_in_dma, data_in_sz);
1077 ioc->put_smid_default(ioc, smid);
1078 break;
1079 }
1080
1081 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
1082 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
1083 else
1084 timeout = karg.timeout;
1085 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
1086 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
1087 Mpi2SCSITaskManagementRequest_t *tm_request =
1088 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
1089 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
1090 tm_request->DevHandle));
1091 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
1092 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
1093 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
1094 ioc->ioc_link_reset_in_progress) {
1095 ioc->ioc_link_reset_in_progress = 0;
1096 ioc->ignore_loginfos = 0;
1097 }
1098 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1099 mpt3sas_check_cmd_timeout(ioc,
1100 ioc->ctl_cmds.status, mpi_request,
1101 karg.data_sge_offset, issue_reset);
1102 goto issue_host_reset;
1103 }
1104
1105 mpi_reply = ioc->ctl_cmds.reply;
1106
1107 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
1108 (ioc->logging_level & MPT_DEBUG_TM)) {
1109 Mpi2SCSITaskManagementReply_t *tm_reply =
1110 (Mpi2SCSITaskManagementReply_t *)mpi_reply;
1111
1112 ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
1113 le16_to_cpu(tm_reply->IOCStatus),
1114 le32_to_cpu(tm_reply->IOCLogInfo),
1115 le32_to_cpu(tm_reply->TerminationCount));
1116 }
1117
1118 /* copy out xdata to user */
1119 if (data_in_sz) {
1120 if (copy_to_user(karg.data_in_buf_ptr, data_in,
1121 data_in_sz)) {
1122 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1123 __LINE__, __func__);
1124 ret = -ENODATA;
1125 goto out;
1126 }
1127 }
1128
1129 /* copy out reply message frame to user */
1130 if (karg.max_reply_bytes) {
1131 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
1132 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
1133 sz)) {
1134 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1135 __LINE__, __func__);
1136 ret = -ENODATA;
1137 goto out;
1138 }
1139 }
1140
1141 /* copy out sense/NVMe Error Response to user */
1142 if (karg.max_sense_bytes && (mpi_request->Function ==
1143 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1144 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1145 MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1146 if (karg.sense_data_ptr == NULL) {
1147 ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
1148 goto out;
1149 }
1150 sz_arg = (mpi_request->Function ==
1151 MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
1152 SCSI_SENSE_BUFFERSIZE;
1153 sz = min_t(u32, karg.max_sense_bytes, sz_arg);
1154 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1155 sz)) {
1156 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1157 __LINE__, __func__);
1158 ret = -ENODATA;
1159 goto out;
1160 }
1161 }
1162
1163 issue_host_reset:
1164 if (issue_reset) {
1165 ret = -ENODATA;
1166 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
1167 mpi_request->Function ==
1168 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1169 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1170 ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
1171 le16_to_cpu(mpi_request->FunctionDependent1));
1172 mpt3sas_halt_firmware(ioc);
1173 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1174 le16_to_cpu(mpi_request->FunctionDependent1));
1175 if (pcie_device && (!ioc->tm_custom_handling) &&
1176 (!(mpt3sas_scsih_is_pcie_scsi_device(
1177 pcie_device->device_info))))
1178 tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
1179 le16_to_cpu(mpi_request->FunctionDependent1),
1180 0, 0, 0,
1181 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1182 0, pcie_device->reset_timeout,
1183 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
1184 else
1185 tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
1186 le16_to_cpu(mpi_request->FunctionDependent1),
1187 0, 0, 0,
1188 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1189 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
1190
1191 if (tm_ret != SUCCESS) {
1192 ioc_info(ioc,
1193 "target reset failed, issue hard reset: handle (0x%04x)\n",
1194 le16_to_cpu(mpi_request->FunctionDependent1));
1195 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1196 }
1197 } else
1198 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1199 }
1200
1201 out:
1202 if (pcie_device)
1203 pcie_device_put(pcie_device);
1204
1205 /* free memory associated with sg buffers */
1206 if (data_in)
1207 dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
1208 data_in_dma);
1209
1210 if (data_out)
1211 dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
1212 data_out_dma);
1213
1214 kfree(mpi_request);
1215 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1216 return ret;
1217 }
1218
1219 /**
1220 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
1221 * @ioc: per adapter object
1222 * @arg: user space buffer containing ioctl content
1223 */
1224 static long
_ctl_getiocinfo(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1225 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1226 {
1227 struct mpt3_ioctl_iocinfo karg;
1228
1229 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1230 __func__));
1231
1232 memset(&karg, 0 , sizeof(karg));
1233 if (ioc->pfacts)
1234 karg.port_number = ioc->pfacts[0].PortNumber;
1235 karg.hw_rev = ioc->pdev->revision;
1236 karg.pci_id = ioc->pdev->device;
1237 karg.subsystem_device = ioc->pdev->subsystem_device;
1238 karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1239 karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1240 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1241 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1242 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1243 karg.firmware_version = ioc->facts.FWVersion.Word;
1244 strcpy(karg.driver_version, ioc->driver_name);
1245 strcat(karg.driver_version, "-");
1246 switch (ioc->hba_mpi_version_belonged) {
1247 case MPI2_VERSION:
1248 if (ioc->is_warpdrive)
1249 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1250 else
1251 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1252 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
1253 break;
1254 case MPI25_VERSION:
1255 case MPI26_VERSION:
1256 if (ioc->is_gen35_ioc)
1257 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1258 else
1259 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1260 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1261 break;
1262 }
1263 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1264
1265 karg.driver_capability |= MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU;
1266
1267 if (copy_to_user(arg, &karg, sizeof(karg))) {
1268 pr_err("failure at %s:%d/%s()!\n",
1269 __FILE__, __LINE__, __func__);
1270 return -EFAULT;
1271 }
1272 return 0;
1273 }
1274
1275 /**
1276 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
1277 * @ioc: per adapter object
1278 * @arg: user space buffer containing ioctl content
1279 */
1280 static long
_ctl_eventquery(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1281 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1282 {
1283 struct mpt3_ioctl_eventquery karg;
1284
1285 if (copy_from_user(&karg, arg, sizeof(karg))) {
1286 pr_err("failure at %s:%d/%s()!\n",
1287 __FILE__, __LINE__, __func__);
1288 return -EFAULT;
1289 }
1290
1291 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1292 __func__));
1293
1294 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1295 memcpy(karg.event_types, ioc->event_type,
1296 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1297
1298 if (copy_to_user(arg, &karg, sizeof(karg))) {
1299 pr_err("failure at %s:%d/%s()!\n",
1300 __FILE__, __LINE__, __func__);
1301 return -EFAULT;
1302 }
1303 return 0;
1304 }
1305
1306 /**
1307 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
1308 * @ioc: per adapter object
1309 * @arg: user space buffer containing ioctl content
1310 */
1311 static long
_ctl_eventenable(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1312 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1313 {
1314 struct mpt3_ioctl_eventenable karg;
1315
1316 if (copy_from_user(&karg, arg, sizeof(karg))) {
1317 pr_err("failure at %s:%d/%s()!\n",
1318 __FILE__, __LINE__, __func__);
1319 return -EFAULT;
1320 }
1321
1322 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1323 __func__));
1324
1325 memcpy(ioc->event_type, karg.event_types,
1326 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1327 mpt3sas_base_validate_event_type(ioc, ioc->event_type);
1328
1329 if (ioc->event_log)
1330 return 0;
1331 /* initialize event_log */
1332 ioc->event_context = 0;
1333 ioc->aen_event_read_flag = 0;
1334 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
1335 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
1336 if (!ioc->event_log) {
1337 pr_err("failure at %s:%d/%s()!\n",
1338 __FILE__, __LINE__, __func__);
1339 return -ENOMEM;
1340 }
1341 return 0;
1342 }
1343
1344 /**
1345 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
1346 * @ioc: per adapter object
1347 * @arg: user space buffer containing ioctl content
1348 */
1349 static long
_ctl_eventreport(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1350 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1351 {
1352 struct mpt3_ioctl_eventreport karg;
1353 u32 number_bytes, max_events, max;
1354 struct mpt3_ioctl_eventreport __user *uarg = arg;
1355
1356 if (copy_from_user(&karg, arg, sizeof(karg))) {
1357 pr_err("failure at %s:%d/%s()!\n",
1358 __FILE__, __LINE__, __func__);
1359 return -EFAULT;
1360 }
1361
1362 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1363 __func__));
1364
1365 number_bytes = karg.hdr.max_data_size -
1366 sizeof(struct mpt3_ioctl_header);
1367 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
1368 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
1369
1370 /* If fewer than 1 event is requested, there must have
1371 * been some type of error.
1372 */
1373 if (!max || !ioc->event_log)
1374 return -ENODATA;
1375
1376 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
1377 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1378 pr_err("failure at %s:%d/%s()!\n",
1379 __FILE__, __LINE__, __func__);
1380 return -EFAULT;
1381 }
1382
1383 /* reset flag so SIGIO can restart */
1384 ioc->aen_event_read_flag = 0;
1385 return 0;
1386 }
1387
1388 /**
1389 * _ctl_do_reset - main handler for MPT3HARDRESET opcode
1390 * @ioc: per adapter object
1391 * @arg: user space buffer containing ioctl content
1392 */
1393 static long
_ctl_do_reset(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1394 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1395 {
1396 struct mpt3_ioctl_diag_reset karg;
1397 int retval;
1398
1399 if (copy_from_user(&karg, arg, sizeof(karg))) {
1400 pr_err("failure at %s:%d/%s()!\n",
1401 __FILE__, __LINE__, __func__);
1402 return -EFAULT;
1403 }
1404
1405 if (ioc->shost_recovery || ioc->pci_error_recovery ||
1406 ioc->is_driver_loading)
1407 return -EAGAIN;
1408
1409 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
1410 __func__));
1411
1412 ioc->reset_from_user = 1;
1413 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1414 ioc_info(ioc,
1415 "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
1416 return 0;
1417 }
1418
1419 /**
1420 * _ctl_btdh_search_sas_device - searching for sas device
1421 * @ioc: per adapter object
1422 * @btdh: btdh ioctl payload
1423 */
1424 static int
_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_btdh_mapping * btdh)1425 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1426 struct mpt3_ioctl_btdh_mapping *btdh)
1427 {
1428 struct _sas_device *sas_device;
1429 unsigned long flags;
1430 int rc = 0;
1431
1432 if (list_empty(&ioc->sas_device_list))
1433 return rc;
1434
1435 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1436 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1437 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1438 btdh->handle == sas_device->handle) {
1439 btdh->bus = sas_device->channel;
1440 btdh->id = sas_device->id;
1441 rc = 1;
1442 goto out;
1443 } else if (btdh->bus == sas_device->channel && btdh->id ==
1444 sas_device->id && btdh->handle == 0xFFFF) {
1445 btdh->handle = sas_device->handle;
1446 rc = 1;
1447 goto out;
1448 }
1449 }
1450 out:
1451 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1452 return rc;
1453 }
1454
1455 /**
1456 * _ctl_btdh_search_pcie_device - searching for pcie device
1457 * @ioc: per adapter object
1458 * @btdh: btdh ioctl payload
1459 */
1460 static int
_ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_btdh_mapping * btdh)1461 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
1462 struct mpt3_ioctl_btdh_mapping *btdh)
1463 {
1464 struct _pcie_device *pcie_device;
1465 unsigned long flags;
1466 int rc = 0;
1467
1468 if (list_empty(&ioc->pcie_device_list))
1469 return rc;
1470
1471 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1472 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1473 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1474 btdh->handle == pcie_device->handle) {
1475 btdh->bus = pcie_device->channel;
1476 btdh->id = pcie_device->id;
1477 rc = 1;
1478 goto out;
1479 } else if (btdh->bus == pcie_device->channel && btdh->id ==
1480 pcie_device->id && btdh->handle == 0xFFFF) {
1481 btdh->handle = pcie_device->handle;
1482 rc = 1;
1483 goto out;
1484 }
1485 }
1486 out:
1487 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1488 return rc;
1489 }
1490
1491 /**
1492 * _ctl_btdh_search_raid_device - searching for raid device
1493 * @ioc: per adapter object
1494 * @btdh: btdh ioctl payload
1495 */
1496 static int
_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_btdh_mapping * btdh)1497 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
1498 struct mpt3_ioctl_btdh_mapping *btdh)
1499 {
1500 struct _raid_device *raid_device;
1501 unsigned long flags;
1502 int rc = 0;
1503
1504 if (list_empty(&ioc->raid_device_list))
1505 return rc;
1506
1507 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1508 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1509 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1510 btdh->handle == raid_device->handle) {
1511 btdh->bus = raid_device->channel;
1512 btdh->id = raid_device->id;
1513 rc = 1;
1514 goto out;
1515 } else if (btdh->bus == raid_device->channel && btdh->id ==
1516 raid_device->id && btdh->handle == 0xFFFF) {
1517 btdh->handle = raid_device->handle;
1518 rc = 1;
1519 goto out;
1520 }
1521 }
1522 out:
1523 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1524 return rc;
1525 }
1526
1527 /**
1528 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
1529 * @ioc: per adapter object
1530 * @arg: user space buffer containing ioctl content
1531 */
1532 static long
_ctl_btdh_mapping(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1533 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1534 {
1535 struct mpt3_ioctl_btdh_mapping karg;
1536 int rc;
1537
1538 if (copy_from_user(&karg, arg, sizeof(karg))) {
1539 pr_err("failure at %s:%d/%s()!\n",
1540 __FILE__, __LINE__, __func__);
1541 return -EFAULT;
1542 }
1543
1544 dctlprintk(ioc, ioc_info(ioc, "%s\n",
1545 __func__));
1546
1547 rc = _ctl_btdh_search_sas_device(ioc, &karg);
1548 if (!rc)
1549 rc = _ctl_btdh_search_pcie_device(ioc, &karg);
1550 if (!rc)
1551 _ctl_btdh_search_raid_device(ioc, &karg);
1552
1553 if (copy_to_user(arg, &karg, sizeof(karg))) {
1554 pr_err("failure at %s:%d/%s()!\n",
1555 __FILE__, __LINE__, __func__);
1556 return -EFAULT;
1557 }
1558 return 0;
1559 }
1560
1561 /**
1562 * _ctl_diag_capability - return diag buffer capability
1563 * @ioc: per adapter object
1564 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1565 *
1566 * returns 1 when diag buffer support is enabled in firmware
1567 */
1568 static u8
_ctl_diag_capability(struct MPT3SAS_ADAPTER * ioc,u8 buffer_type)1569 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
1570 {
1571 u8 rc = 0;
1572
1573 switch (buffer_type) {
1574 case MPI2_DIAG_BUF_TYPE_TRACE:
1575 if (ioc->facts.IOCCapabilities &
1576 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1577 rc = 1;
1578 break;
1579 case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1580 if (ioc->facts.IOCCapabilities &
1581 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1582 rc = 1;
1583 break;
1584 case MPI2_DIAG_BUF_TYPE_EXTENDED:
1585 if (ioc->facts.IOCCapabilities &
1586 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1587 rc = 1;
1588 }
1589
1590 return rc;
1591 }
1592
1593 /**
1594 * _ctl_diag_get_bufftype - return diag buffer type
1595 * either TRACE, SNAPSHOT, or EXTENDED
1596 * @ioc: per adapter object
1597 * @unique_id: specifies the unique_id for the buffer
1598 *
1599 * returns MPT3_DIAG_UID_NOT_FOUND if the id not found
1600 */
1601 static u8
_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER * ioc,u32 unique_id)1602 _ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
1603 {
1604 u8 index;
1605
1606 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
1607 if (ioc->unique_id[index] == unique_id)
1608 return index;
1609 }
1610
1611 return MPT3_DIAG_UID_NOT_FOUND;
1612 }
1613
1614 /**
1615 * _ctl_diag_register_2 - wrapper for registering diag buffer support
1616 * @ioc: per adapter object
1617 * @diag_register: the diag_register struct passed in from user space
1618 *
1619 */
1620 static long
_ctl_diag_register_2(struct MPT3SAS_ADAPTER * ioc,struct mpt3_diag_register * diag_register)1621 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1622 struct mpt3_diag_register *diag_register)
1623 {
1624 int rc, i;
1625 void *request_data = NULL;
1626 dma_addr_t request_data_dma;
1627 u32 request_data_sz = 0;
1628 Mpi2DiagBufferPostRequest_t *mpi_request;
1629 Mpi2DiagBufferPostReply_t *mpi_reply;
1630 u8 buffer_type;
1631 u16 smid;
1632 u16 ioc_status;
1633 u32 ioc_state;
1634 u8 issue_reset = 0;
1635
1636 dctlprintk(ioc, ioc_info(ioc, "%s\n",
1637 __func__));
1638
1639 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1640 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1641 ioc_err(ioc, "%s: failed due to ioc not operational\n",
1642 __func__);
1643 rc = -EAGAIN;
1644 goto out;
1645 }
1646
1647 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1648 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
1649 rc = -EAGAIN;
1650 goto out;
1651 }
1652
1653 buffer_type = diag_register->buffer_type;
1654 if (!_ctl_diag_capability(ioc, buffer_type)) {
1655 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
1656 __func__, buffer_type);
1657 return -EPERM;
1658 }
1659
1660 if (diag_register->unique_id == 0) {
1661 ioc_err(ioc,
1662 "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
1663 diag_register->unique_id, buffer_type);
1664 return -EINVAL;
1665 }
1666
1667 if ((ioc->diag_buffer_status[buffer_type] &
1668 MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
1669 !(ioc->diag_buffer_status[buffer_type] &
1670 MPT3_DIAG_BUFFER_IS_RELEASED)) {
1671 ioc_err(ioc,
1672 "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
1673 __func__, buffer_type, ioc->unique_id[buffer_type]);
1674 return -EINVAL;
1675 }
1676
1677 if (ioc->diag_buffer_status[buffer_type] &
1678 MPT3_DIAG_BUFFER_IS_REGISTERED) {
1679 /*
1680 * If driver posts buffer initially, then an application wants
1681 * to Register that buffer (own it) without Releasing first,
1682 * the application Register command MUST have the same buffer
1683 * type and size in the Register command (obtained from the
1684 * Query command). Otherwise that Register command will be
1685 * failed. If the application has released the buffer but wants
1686 * to re-register it, it should be allowed as long as the
1687 * Unique-Id/Size match.
1688 */
1689
1690 if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
1691 ioc->diag_buffer_sz[buffer_type] ==
1692 diag_register->requested_buffer_size) {
1693
1694 if (!(ioc->diag_buffer_status[buffer_type] &
1695 MPT3_DIAG_BUFFER_IS_RELEASED)) {
1696 dctlprintk(ioc, ioc_info(ioc,
1697 "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
1698 __func__, buffer_type,
1699 ioc->unique_id[buffer_type],
1700 diag_register->unique_id));
1701
1702 /*
1703 * Application wants to own the buffer with
1704 * the same size.
1705 */
1706 ioc->unique_id[buffer_type] =
1707 diag_register->unique_id;
1708 rc = 0; /* success */
1709 goto out;
1710 }
1711 } else if (ioc->unique_id[buffer_type] !=
1712 MPT3DIAGBUFFUNIQUEID) {
1713 if (ioc->unique_id[buffer_type] !=
1714 diag_register->unique_id ||
1715 ioc->diag_buffer_sz[buffer_type] !=
1716 diag_register->requested_buffer_size ||
1717 !(ioc->diag_buffer_status[buffer_type] &
1718 MPT3_DIAG_BUFFER_IS_RELEASED)) {
1719 ioc_err(ioc,
1720 "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1721 __func__, buffer_type);
1722 return -EINVAL;
1723 }
1724 } else {
1725 ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1726 __func__, buffer_type);
1727 return -EINVAL;
1728 }
1729 } else if (ioc->diag_buffer_status[buffer_type] &
1730 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
1731
1732 if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
1733 ioc->diag_buffer_sz[buffer_type] !=
1734 diag_register->requested_buffer_size) {
1735
1736 ioc_err(ioc,
1737 "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
1738 __func__, buffer_type,
1739 ioc->diag_buffer_sz[buffer_type]);
1740 return -EINVAL;
1741 }
1742 }
1743
1744 if (diag_register->requested_buffer_size % 4) {
1745 ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
1746 __func__);
1747 return -EINVAL;
1748 }
1749
1750 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1751 if (!smid) {
1752 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
1753 rc = -EAGAIN;
1754 goto out;
1755 }
1756
1757 rc = 0;
1758 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1759 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1761 memset(mpi_request, 0, ioc->request_sz);
1762 ioc->ctl_cmds.smid = smid;
1763
1764 request_data = ioc->diag_buffer[buffer_type];
1765 request_data_sz = diag_register->requested_buffer_size;
1766 ioc->unique_id[buffer_type] = diag_register->unique_id;
1767 /* Reset ioc variables used for additional query commands */
1768 ioc->reset_from_user = 0;
1769 memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query));
1770 ioc->diag_buffer_status[buffer_type] &=
1771 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1772 memcpy(ioc->product_specific[buffer_type],
1773 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
1774 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1775
1776 if (request_data) {
1777 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1778 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1779 dma_free_coherent(&ioc->pdev->dev,
1780 ioc->diag_buffer_sz[buffer_type],
1781 request_data, request_data_dma);
1782 request_data = NULL;
1783 }
1784 }
1785
1786 if (request_data == NULL) {
1787 ioc->diag_buffer_sz[buffer_type] = 0;
1788 ioc->diag_buffer_dma[buffer_type] = 0;
1789 request_data = dma_alloc_coherent(&ioc->pdev->dev,
1790 request_data_sz, &request_data_dma, GFP_KERNEL);
1791 if (request_data == NULL) {
1792 ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
1793 __func__, request_data_sz);
1794 mpt3sas_base_free_smid(ioc, smid);
1795 rc = -ENOMEM;
1796 goto out;
1797 }
1798 ioc->diag_buffer[buffer_type] = request_data;
1799 ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1800 ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1801 }
1802
1803 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1804 mpi_request->BufferType = diag_register->buffer_type;
1805 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1806 mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1807 mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1808 mpi_request->VF_ID = 0; /* TODO */
1809 mpi_request->VP_ID = 0;
1810
1811 dctlprintk(ioc,
1812 ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1813 __func__, request_data,
1814 (unsigned long long)request_data_dma,
1815 le32_to_cpu(mpi_request->BufferLength)));
1816
1817 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1818 mpi_request->ProductSpecific[i] =
1819 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1820
1821 init_completion(&ioc->ctl_cmds.done);
1822 ioc->put_smid_default(ioc, smid);
1823 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1824 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1825
1826 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1827 mpt3sas_check_cmd_timeout(ioc,
1828 ioc->ctl_cmds.status, mpi_request,
1829 sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
1830 goto issue_host_reset;
1831 }
1832
1833 /* process the completed Reply Message Frame */
1834 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1835 ioc_err(ioc, "%s: no reply message\n", __func__);
1836 rc = -EFAULT;
1837 goto out;
1838 }
1839
1840 mpi_reply = ioc->ctl_cmds.reply;
1841 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1842
1843 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1844 ioc->diag_buffer_status[buffer_type] |=
1845 MPT3_DIAG_BUFFER_IS_REGISTERED;
1846 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
1847 } else {
1848 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1849 __func__,
1850 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1851 rc = -EFAULT;
1852 }
1853
1854 issue_host_reset:
1855 if (issue_reset)
1856 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1857
1858 out:
1859
1860 if (rc && request_data) {
1861 dma_free_coherent(&ioc->pdev->dev, request_data_sz,
1862 request_data, request_data_dma);
1863 ioc->diag_buffer[buffer_type] = NULL;
1864 ioc->diag_buffer_status[buffer_type] &=
1865 ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1866 }
1867
1868 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1869 return rc;
1870 }
1871
1872 /**
1873 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
1874 * @ioc: per adapter object
1875 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1876 *
1877 * This is called when command line option diag_buffer_enable is enabled
1878 * at driver load time.
1879 */
1880 void
mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER * ioc,u8 bits_to_register)1881 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1882 {
1883 struct mpt3_diag_register diag_register;
1884 u32 ret_val;
1885 u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
1886 u32 min_trace_buff_size = 0;
1887 u32 decr_trace_buff_size = 0;
1888
1889 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1890
1891 if (bits_to_register & 1) {
1892 ioc_info(ioc, "registering trace buffer support\n");
1893 ioc->diag_trigger_master.MasterData =
1894 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1895 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1896 diag_register.unique_id =
1897 (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
1898 (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
1899
1900 if (trace_buff_size != 0) {
1901 diag_register.requested_buffer_size = trace_buff_size;
1902 min_trace_buff_size =
1903 ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
1904 decr_trace_buff_size =
1905 ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
1906
1907 if (min_trace_buff_size > trace_buff_size) {
1908 /* The buff size is not set correctly */
1909 ioc_err(ioc,
1910 "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
1911 min_trace_buff_size>>10,
1912 trace_buff_size>>10);
1913 ioc_err(ioc,
1914 "Using zero Min Trace Buff Size\n");
1915 min_trace_buff_size = 0;
1916 }
1917
1918 if (decr_trace_buff_size == 0) {
1919 /*
1920 * retry the min size if decrement
1921 * is not available.
1922 */
1923 decr_trace_buff_size =
1924 trace_buff_size - min_trace_buff_size;
1925 }
1926 } else {
1927 /* register for 2MB buffers */
1928 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1929 }
1930
1931 do {
1932 ret_val = _ctl_diag_register_2(ioc, &diag_register);
1933
1934 if (ret_val == -ENOMEM && min_trace_buff_size &&
1935 (trace_buff_size - decr_trace_buff_size) >=
1936 min_trace_buff_size) {
1937 /* adjust the buffer size */
1938 trace_buff_size -= decr_trace_buff_size;
1939 diag_register.requested_buffer_size =
1940 trace_buff_size;
1941 } else
1942 break;
1943 } while (true);
1944
1945 if (ret_val == -ENOMEM)
1946 ioc_err(ioc,
1947 "Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
1948 diag_register.requested_buffer_size>>10);
1949 else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
1950 & MPT3_DIAG_BUFFER_IS_REGISTERED) {
1951 ioc_info(ioc, "Trace buffer memory %d KB allocated\n",
1952 diag_register.requested_buffer_size>>10);
1953 if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
1954 ioc->diag_buffer_status[
1955 MPI2_DIAG_BUF_TYPE_TRACE] |=
1956 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
1957 }
1958 }
1959
1960 if (bits_to_register & 2) {
1961 ioc_info(ioc, "registering snapshot buffer support\n");
1962 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1963 /* register for 2MB buffers */
1964 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1965 diag_register.unique_id = 0x7075901;
1966 _ctl_diag_register_2(ioc, &diag_register);
1967 }
1968
1969 if (bits_to_register & 4) {
1970 ioc_info(ioc, "registering extended buffer support\n");
1971 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1972 /* register for 2MB buffers */
1973 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1974 diag_register.unique_id = 0x7075901;
1975 _ctl_diag_register_2(ioc, &diag_register);
1976 }
1977 }
1978
1979 /**
1980 * _ctl_diag_register - application register with driver
1981 * @ioc: per adapter object
1982 * @arg: user space buffer containing ioctl content
1983 *
1984 * This will allow the driver to setup any required buffers that will be
1985 * needed by firmware to communicate with the driver.
1986 */
1987 static long
_ctl_diag_register(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1988 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1989 {
1990 struct mpt3_diag_register karg;
1991 long rc;
1992
1993 if (copy_from_user(&karg, arg, sizeof(karg))) {
1994 pr_err("failure at %s:%d/%s()!\n",
1995 __FILE__, __LINE__, __func__);
1996 return -EFAULT;
1997 }
1998
1999 rc = _ctl_diag_register_2(ioc, &karg);
2000
2001 if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
2002 MPT3_DIAG_BUFFER_IS_REGISTERED))
2003 ioc->diag_buffer_status[karg.buffer_type] |=
2004 MPT3_DIAG_BUFFER_IS_APP_OWNED;
2005
2006 return rc;
2007 }
2008
2009 /**
2010 * _ctl_diag_unregister - application unregister with driver
2011 * @ioc: per adapter object
2012 * @arg: user space buffer containing ioctl content
2013 *
2014 * This will allow the driver to cleanup any memory allocated for diag
2015 * messages and to free up any resources.
2016 */
2017 static long
_ctl_diag_unregister(struct MPT3SAS_ADAPTER * ioc,void __user * arg)2018 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2019 {
2020 struct mpt3_diag_unregister karg;
2021 void *request_data;
2022 dma_addr_t request_data_dma;
2023 u32 request_data_sz;
2024 u8 buffer_type;
2025
2026 if (copy_from_user(&karg, arg, sizeof(karg))) {
2027 pr_err("failure at %s:%d/%s()!\n",
2028 __FILE__, __LINE__, __func__);
2029 return -EFAULT;
2030 }
2031
2032 dctlprintk(ioc, ioc_info(ioc, "%s\n",
2033 __func__));
2034
2035 buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2036 if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2037 ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2038 __func__, karg.unique_id);
2039 return -EINVAL;
2040 }
2041
2042 if (!_ctl_diag_capability(ioc, buffer_type)) {
2043 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2044 __func__, buffer_type);
2045 return -EPERM;
2046 }
2047
2048 if ((ioc->diag_buffer_status[buffer_type] &
2049 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2050 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2051 __func__, buffer_type);
2052 return -EINVAL;
2053 }
2054 if ((ioc->diag_buffer_status[buffer_type] &
2055 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2056 ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
2057 __func__, buffer_type);
2058 return -EINVAL;
2059 }
2060
2061 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2062 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2063 __func__, karg.unique_id);
2064 return -EINVAL;
2065 }
2066
2067 request_data = ioc->diag_buffer[buffer_type];
2068 if (!request_data) {
2069 ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2070 __func__, buffer_type);
2071 return -ENOMEM;
2072 }
2073
2074 if (ioc->diag_buffer_status[buffer_type] &
2075 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
2076 ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
2077 ioc->diag_buffer_status[buffer_type] &=
2078 ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
2079 ioc->diag_buffer_status[buffer_type] &=
2080 ~MPT3_DIAG_BUFFER_IS_REGISTERED;
2081 } else {
2082 request_data_sz = ioc->diag_buffer_sz[buffer_type];
2083 request_data_dma = ioc->diag_buffer_dma[buffer_type];
2084 dma_free_coherent(&ioc->pdev->dev, request_data_sz,
2085 request_data, request_data_dma);
2086 ioc->diag_buffer[buffer_type] = NULL;
2087 ioc->diag_buffer_status[buffer_type] = 0;
2088 }
2089 return 0;
2090 }
2091
2092 /**
2093 * _ctl_diag_query - query relevant info associated with diag buffers
2094 * @ioc: per adapter object
2095 * @arg: user space buffer containing ioctl content
2096 *
2097 * The application will send only buffer_type and unique_id. Driver will
2098 * inspect unique_id first, if valid, fill in all the info. If unique_id is
2099 * 0x00, the driver will return info specified by Buffer Type.
2100 */
2101 static long
_ctl_diag_query(struct MPT3SAS_ADAPTER * ioc,void __user * arg)2102 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2103 {
2104 struct mpt3_diag_query karg;
2105 void *request_data;
2106 int i;
2107 u8 buffer_type;
2108
2109 if (copy_from_user(&karg, arg, sizeof(karg))) {
2110 pr_err("failure at %s:%d/%s()!\n",
2111 __FILE__, __LINE__, __func__);
2112 return -EFAULT;
2113 }
2114
2115 dctlprintk(ioc, ioc_info(ioc, "%s\n",
2116 __func__));
2117
2118 karg.application_flags = 0;
2119 buffer_type = karg.buffer_type;
2120
2121 if (!_ctl_diag_capability(ioc, buffer_type)) {
2122 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2123 __func__, buffer_type);
2124 return -EPERM;
2125 }
2126
2127 if (!(ioc->diag_buffer_status[buffer_type] &
2128 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
2129 if ((ioc->diag_buffer_status[buffer_type] &
2130 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2131 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2132 __func__, buffer_type);
2133 return -EINVAL;
2134 }
2135 }
2136
2137 if (karg.unique_id) {
2138 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2139 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2140 __func__, karg.unique_id);
2141 return -EINVAL;
2142 }
2143 }
2144
2145 request_data = ioc->diag_buffer[buffer_type];
2146 if (!request_data) {
2147 ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2148 __func__, buffer_type);
2149 return -ENOMEM;
2150 }
2151
2152 if ((ioc->diag_buffer_status[buffer_type] &
2153 MPT3_DIAG_BUFFER_IS_REGISTERED))
2154 karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
2155
2156 if (!(ioc->diag_buffer_status[buffer_type] &
2157 MPT3_DIAG_BUFFER_IS_RELEASED))
2158 karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
2159
2160 if (!(ioc->diag_buffer_status[buffer_type] &
2161 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
2162 karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
2163
2164 if ((ioc->diag_buffer_status[buffer_type] &
2165 MPT3_DIAG_BUFFER_IS_APP_OWNED))
2166 karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
2167
2168 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2169 karg.product_specific[i] =
2170 ioc->product_specific[buffer_type][i];
2171
2172 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
2173 karg.driver_added_buffer_size = 0;
2174 karg.unique_id = ioc->unique_id[buffer_type];
2175 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
2176
2177 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
2178 ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
2179 __func__, arg);
2180 return -EFAULT;
2181 }
2182 return 0;
2183 }
2184
2185 /**
2186 * mpt3sas_send_diag_release - Diag Release Message
2187 * @ioc: per adapter object
2188 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
2189 * @issue_reset: specifies whether host reset is required.
2190 *
2191 */
2192 int
mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER * ioc,u8 buffer_type,u8 * issue_reset)2193 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
2194 u8 *issue_reset)
2195 {
2196 Mpi2DiagReleaseRequest_t *mpi_request;
2197 Mpi2DiagReleaseReply_t *mpi_reply;
2198 u16 smid;
2199 u16 ioc_status;
2200 u32 ioc_state;
2201 int rc;
2202 u8 reset_needed = 0;
2203
2204 dctlprintk(ioc, ioc_info(ioc, "%s\n",
2205 __func__));
2206
2207 rc = 0;
2208 *issue_reset = 0;
2209
2210
2211 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
2212 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2213 if (ioc->diag_buffer_status[buffer_type] &
2214 MPT3_DIAG_BUFFER_IS_REGISTERED)
2215 ioc->diag_buffer_status[buffer_type] |=
2216 MPT3_DIAG_BUFFER_IS_RELEASED;
2217 dctlprintk(ioc,
2218 ioc_info(ioc, "%s: skipping due to FAULT state\n",
2219 __func__));
2220 rc = -EAGAIN;
2221 goto out;
2222 }
2223
2224 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2225 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2226 rc = -EAGAIN;
2227 goto out;
2228 }
2229
2230 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2231 if (!smid) {
2232 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2233 rc = -EAGAIN;
2234 goto out;
2235 }
2236
2237 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2238 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2239 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2240 memset(mpi_request, 0, ioc->request_sz);
2241 ioc->ctl_cmds.smid = smid;
2242
2243 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
2244 mpi_request->BufferType = buffer_type;
2245 mpi_request->VF_ID = 0; /* TODO */
2246 mpi_request->VP_ID = 0;
2247
2248 init_completion(&ioc->ctl_cmds.done);
2249 ioc->put_smid_default(ioc, smid);
2250 wait_for_completion_timeout(&ioc->ctl_cmds.done,
2251 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2252
2253 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2254 mpt3sas_check_cmd_timeout(ioc,
2255 ioc->ctl_cmds.status, mpi_request,
2256 sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
2257 *issue_reset = reset_needed;
2258 rc = -EFAULT;
2259 goto out;
2260 }
2261
2262 /* process the completed Reply Message Frame */
2263 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2264 ioc_err(ioc, "%s: no reply message\n", __func__);
2265 rc = -EFAULT;
2266 goto out;
2267 }
2268
2269 mpi_reply = ioc->ctl_cmds.reply;
2270 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2271
2272 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2273 ioc->diag_buffer_status[buffer_type] |=
2274 MPT3_DIAG_BUFFER_IS_RELEASED;
2275 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
2276 } else {
2277 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2278 __func__,
2279 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2280 rc = -EFAULT;
2281 }
2282
2283 out:
2284 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2285 return rc;
2286 }
2287
2288 /**
2289 * _ctl_diag_release - request to send Diag Release Message to firmware
2290 * @ioc: ?
2291 * @arg: user space buffer containing ioctl content
2292 *
2293 * This allows ownership of the specified buffer to returned to the driver,
2294 * allowing an application to read the buffer without fear that firmware is
2295 * overwriting information in the buffer.
2296 */
2297 static long
_ctl_diag_release(struct MPT3SAS_ADAPTER * ioc,void __user * arg)2298 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2299 {
2300 struct mpt3_diag_release karg;
2301 void *request_data;
2302 int rc;
2303 u8 buffer_type;
2304 u8 issue_reset = 0;
2305
2306 if (copy_from_user(&karg, arg, sizeof(karg))) {
2307 pr_err("failure at %s:%d/%s()!\n",
2308 __FILE__, __LINE__, __func__);
2309 return -EFAULT;
2310 }
2311
2312 dctlprintk(ioc, ioc_info(ioc, "%s\n",
2313 __func__));
2314
2315 buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2316 if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2317 ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2318 __func__, karg.unique_id);
2319 return -EINVAL;
2320 }
2321
2322 if (!_ctl_diag_capability(ioc, buffer_type)) {
2323 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2324 __func__, buffer_type);
2325 return -EPERM;
2326 }
2327
2328 if ((ioc->diag_buffer_status[buffer_type] &
2329 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2330 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2331 __func__, buffer_type);
2332 return -EINVAL;
2333 }
2334
2335 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2336 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2337 __func__, karg.unique_id);
2338 return -EINVAL;
2339 }
2340
2341 if (ioc->diag_buffer_status[buffer_type] &
2342 MPT3_DIAG_BUFFER_IS_RELEASED) {
2343 ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
2344 __func__, buffer_type);
2345 return -EINVAL;
2346 }
2347
2348 request_data = ioc->diag_buffer[buffer_type];
2349
2350 if (!request_data) {
2351 ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2352 __func__, buffer_type);
2353 return -ENOMEM;
2354 }
2355
2356 /* buffers were released by due to host reset */
2357 if ((ioc->diag_buffer_status[buffer_type] &
2358 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
2359 ioc->diag_buffer_status[buffer_type] |=
2360 MPT3_DIAG_BUFFER_IS_RELEASED;
2361 ioc->diag_buffer_status[buffer_type] &=
2362 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
2363 ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
2364 __func__, buffer_type);
2365 return 0;
2366 }
2367
2368 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
2369
2370 if (issue_reset)
2371 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2372
2373 return rc;
2374 }
2375
2376 /**
2377 * _ctl_diag_read_buffer - request for copy of the diag buffer
2378 * @ioc: per adapter object
2379 * @arg: user space buffer containing ioctl content
2380 */
2381 static long
_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER * ioc,void __user * arg)2382 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2383 {
2384 struct mpt3_diag_read_buffer karg;
2385 struct mpt3_diag_read_buffer __user *uarg = arg;
2386 void *request_data, *diag_data;
2387 Mpi2DiagBufferPostRequest_t *mpi_request;
2388 Mpi2DiagBufferPostReply_t *mpi_reply;
2389 int rc, i;
2390 u8 buffer_type;
2391 unsigned long request_size, copy_size;
2392 u16 smid;
2393 u16 ioc_status;
2394 u8 issue_reset = 0;
2395
2396 if (copy_from_user(&karg, arg, sizeof(karg))) {
2397 pr_err("failure at %s:%d/%s()!\n",
2398 __FILE__, __LINE__, __func__);
2399 return -EFAULT;
2400 }
2401
2402 dctlprintk(ioc, ioc_info(ioc, "%s\n",
2403 __func__));
2404
2405 buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2406 if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2407 ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2408 __func__, karg.unique_id);
2409 return -EINVAL;
2410 }
2411
2412 if (!_ctl_diag_capability(ioc, buffer_type)) {
2413 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
2414 __func__, buffer_type);
2415 return -EPERM;
2416 }
2417
2418 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2419 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
2420 __func__, karg.unique_id);
2421 return -EINVAL;
2422 }
2423
2424 request_data = ioc->diag_buffer[buffer_type];
2425 if (!request_data) {
2426 ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2427 __func__, buffer_type);
2428 return -ENOMEM;
2429 }
2430
2431 request_size = ioc->diag_buffer_sz[buffer_type];
2432
2433 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2434 ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
2435 __func__);
2436 return -EINVAL;
2437 }
2438
2439 if (karg.starting_offset > request_size)
2440 return -EINVAL;
2441
2442 diag_data = (void *)(request_data + karg.starting_offset);
2443 dctlprintk(ioc,
2444 ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2445 __func__, diag_data, karg.starting_offset,
2446 karg.bytes_to_read));
2447
2448 /* Truncate data on requests that are too large */
2449 if ((diag_data + karg.bytes_to_read < diag_data) ||
2450 (diag_data + karg.bytes_to_read > request_data + request_size))
2451 copy_size = request_size - karg.starting_offset;
2452 else
2453 copy_size = karg.bytes_to_read;
2454
2455 if (copy_to_user((void __user *)uarg->diagnostic_data,
2456 diag_data, copy_size)) {
2457 ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2458 __func__, diag_data);
2459 return -EFAULT;
2460 }
2461
2462 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2463 return 0;
2464
2465 dctlprintk(ioc,
2466 ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
2467 __func__, buffer_type));
2468 if ((ioc->diag_buffer_status[buffer_type] &
2469 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2470 dctlprintk(ioc,
2471 ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
2472 __func__, buffer_type));
2473 return 0;
2474 }
2475 /* Get a free request frame and save the message context.
2476 */
2477
2478 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2479 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2480 rc = -EAGAIN;
2481 goto out;
2482 }
2483
2484 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2485 if (!smid) {
2486 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2487 rc = -EAGAIN;
2488 goto out;
2489 }
2490
2491 rc = 0;
2492 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2493 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2494 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2495 memset(mpi_request, 0, ioc->request_sz);
2496 ioc->ctl_cmds.smid = smid;
2497
2498 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
2499 mpi_request->BufferType = buffer_type;
2500 mpi_request->BufferLength =
2501 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2502 mpi_request->BufferAddress =
2503 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2504 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2505 mpi_request->ProductSpecific[i] =
2506 cpu_to_le32(ioc->product_specific[buffer_type][i]);
2507 mpi_request->VF_ID = 0; /* TODO */
2508 mpi_request->VP_ID = 0;
2509
2510 init_completion(&ioc->ctl_cmds.done);
2511 ioc->put_smid_default(ioc, smid);
2512 wait_for_completion_timeout(&ioc->ctl_cmds.done,
2513 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2514
2515 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2516 mpt3sas_check_cmd_timeout(ioc,
2517 ioc->ctl_cmds.status, mpi_request,
2518 sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
2519 goto issue_host_reset;
2520 }
2521
2522 /* process the completed Reply Message Frame */
2523 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2524 ioc_err(ioc, "%s: no reply message\n", __func__);
2525 rc = -EFAULT;
2526 goto out;
2527 }
2528
2529 mpi_reply = ioc->ctl_cmds.reply;
2530 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2531
2532 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2533 ioc->diag_buffer_status[buffer_type] |=
2534 MPT3_DIAG_BUFFER_IS_REGISTERED;
2535 ioc->diag_buffer_status[buffer_type] &=
2536 ~MPT3_DIAG_BUFFER_IS_RELEASED;
2537 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
2538 } else {
2539 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2540 __func__, ioc_status,
2541 le32_to_cpu(mpi_reply->IOCLogInfo));
2542 rc = -EFAULT;
2543 }
2544
2545 issue_host_reset:
2546 if (issue_reset)
2547 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2548
2549 out:
2550
2551 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2552 return rc;
2553 }
2554
2555 /**
2556 * _ctl_addnl_diag_query - query relevant info associated with diag buffers
2557 * @ioc: per adapter object
2558 * @arg: user space buffer containing ioctl content
2559 *
2560 * The application will send only unique_id. Driver will
2561 * inspect unique_id first, if valid, fill the details related to cause
2562 * for diag buffer release.
2563 */
2564 static long
_ctl_addnl_diag_query(struct MPT3SAS_ADAPTER * ioc,void __user * arg)2565 _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2566 {
2567 struct mpt3_addnl_diag_query karg;
2568 u32 buffer_type = 0;
2569
2570 if (copy_from_user(&karg, arg, sizeof(karg))) {
2571 pr_err("%s: failure at %s:%d/%s()!\n",
2572 ioc->name, __FILE__, __LINE__, __func__);
2573 return -EFAULT;
2574 }
2575 dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__));
2576 if (karg.unique_id == 0) {
2577 ioc_err(ioc, "%s: unique_id is(0x%08x)\n",
2578 __func__, karg.unique_id);
2579 return -EPERM;
2580 }
2581 buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
2582 if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
2583 ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
2584 __func__, karg.unique_id);
2585 return -EPERM;
2586 }
2587 memset(&karg.rel_query, 0, sizeof(karg.rel_query));
2588 if ((ioc->diag_buffer_status[buffer_type] &
2589 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2590 ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
2591 __func__, buffer_type);
2592 goto out;
2593 }
2594 if ((ioc->diag_buffer_status[buffer_type] &
2595 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2596 ioc_err(ioc, "%s: buffer_type(0x%02x) is not released\n",
2597 __func__, buffer_type);
2598 return -EPERM;
2599 }
2600 memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
2601 out:
2602 if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
2603 ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
2604 __func__, arg);
2605 return -EFAULT;
2606 }
2607 return 0;
2608 }
2609
2610 /**
2611 * _ctl_enable_diag_sbr_reload - enable sbr reload bit
2612 * @ioc: per adapter object
2613 * @arg: user space buffer containing ioctl content
2614 *
2615 * Enable the SBR reload bit
2616 */
2617 static int
_ctl_enable_diag_sbr_reload(struct MPT3SAS_ADAPTER * ioc,void __user * arg)2618 _ctl_enable_diag_sbr_reload(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2619 {
2620 u32 ioc_state, host_diagnostic;
2621
2622 if (ioc->shost_recovery ||
2623 ioc->pci_error_recovery || ioc->is_driver_loading ||
2624 ioc->remove_host)
2625 return -EAGAIN;
2626
2627 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
2628
2629 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL)
2630 return -EFAULT;
2631
2632 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
2633
2634 if (host_diagnostic & MPI2_DIAG_SBR_RELOAD)
2635 return 0;
2636
2637 if (mutex_trylock(&ioc->hostdiag_unlock_mutex)) {
2638 if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic)) {
2639 mutex_unlock(&ioc->hostdiag_unlock_mutex);
2640 return -EFAULT;
2641 }
2642 } else
2643 return -EAGAIN;
2644
2645 host_diagnostic |= MPI2_DIAG_SBR_RELOAD;
2646 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
2647 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
2648 mpt3sas_base_lock_host_diagnostic(ioc);
2649 mutex_unlock(&ioc->hostdiag_unlock_mutex);
2650
2651 if (!(host_diagnostic & MPI2_DIAG_SBR_RELOAD)) {
2652 ioc_err(ioc, "%s: Failed to set Diag SBR Reload Bit\n", __func__);
2653 return -EFAULT;
2654 }
2655
2656 ioc_info(ioc, "%s: Successfully set the Diag SBR Reload Bit\n", __func__);
2657 return 0;
2658 }
2659
2660 #ifdef CONFIG_COMPAT
2661 /**
2662 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2663 * @ioc: per adapter object
2664 * @cmd: ioctl opcode
2665 * @arg: (struct mpt3_ioctl_command32)
2666 *
2667 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
2668 */
2669 static long
_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER * ioc,unsigned cmd,void __user * arg)2670 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
2671 void __user *arg)
2672 {
2673 struct mpt3_ioctl_command32 karg32;
2674 struct mpt3_ioctl_command32 __user *uarg;
2675 struct mpt3_ioctl_command karg;
2676
2677 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
2678 return -EINVAL;
2679
2680 uarg = (struct mpt3_ioctl_command32 __user *) arg;
2681
2682 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2683 pr_err("failure at %s:%d/%s()!\n",
2684 __FILE__, __LINE__, __func__);
2685 return -EFAULT;
2686 }
2687
2688 memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
2689 karg.hdr.ioc_number = karg32.hdr.ioc_number;
2690 karg.hdr.port_number = karg32.hdr.port_number;
2691 karg.hdr.max_data_size = karg32.hdr.max_data_size;
2692 karg.timeout = karg32.timeout;
2693 karg.max_reply_bytes = karg32.max_reply_bytes;
2694 karg.data_in_size = karg32.data_in_size;
2695 karg.data_out_size = karg32.data_out_size;
2696 karg.max_sense_bytes = karg32.max_sense_bytes;
2697 karg.data_sge_offset = karg32.data_sge_offset;
2698 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2699 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2700 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2701 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2702 return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2703 }
2704 #endif
2705
2706 /**
2707 * _ctl_ioctl_main - main ioctl entry point
2708 * @file: (struct file)
2709 * @cmd: ioctl opcode
2710 * @arg: user space data buffer
2711 * @compat: handles 32 bit applications in 64bit os
2712 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
2713 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
2714 */
2715 static long
_ctl_ioctl_main(struct file * file,unsigned int cmd,void __user * arg,u8 compat,u16 mpi_version)2716 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2717 u8 compat, u16 mpi_version)
2718 {
2719 struct MPT3SAS_ADAPTER *ioc;
2720 struct mpt3_ioctl_header ioctl_header;
2721 enum block_state state;
2722 long ret = -ENOIOCTLCMD;
2723
2724 /* get IOCTL header */
2725 if (copy_from_user(&ioctl_header, (char __user *)arg,
2726 sizeof(struct mpt3_ioctl_header))) {
2727 pr_err("failure at %s:%d/%s()!\n",
2728 __FILE__, __LINE__, __func__);
2729 return -EFAULT;
2730 }
2731
2732 if (_ctl_verify_adapter(ioctl_header.ioc_number,
2733 &ioc, mpi_version) == -1 || !ioc)
2734 return -ENODEV;
2735
2736 /* pci_access_mutex lock acquired by ioctl path */
2737 mutex_lock(&ioc->pci_access_mutex);
2738
2739 if (ioc->shost_recovery || ioc->pci_error_recovery ||
2740 ioc->is_driver_loading || ioc->remove_host) {
2741 ret = -EAGAIN;
2742 goto out_unlock_pciaccess;
2743 }
2744
2745 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2746 if (state == NON_BLOCKING) {
2747 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2748 ret = -EAGAIN;
2749 goto out_unlock_pciaccess;
2750 }
2751 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2752 ret = -ERESTARTSYS;
2753 goto out_unlock_pciaccess;
2754 }
2755
2756
2757 switch (cmd) {
2758 case MPT3IOCINFO:
2759 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
2760 ret = _ctl_getiocinfo(ioc, arg);
2761 break;
2762 #ifdef CONFIG_COMPAT
2763 case MPT3COMMAND32:
2764 #endif
2765 case MPT3COMMAND:
2766 {
2767 struct mpt3_ioctl_command __user *uarg;
2768 struct mpt3_ioctl_command karg;
2769
2770 #ifdef CONFIG_COMPAT
2771 if (compat) {
2772 ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2773 break;
2774 }
2775 #endif
2776 if (copy_from_user(&karg, arg, sizeof(karg))) {
2777 pr_err("failure at %s:%d/%s()!\n",
2778 __FILE__, __LINE__, __func__);
2779 ret = -EFAULT;
2780 break;
2781 }
2782
2783 if (karg.hdr.ioc_number != ioctl_header.ioc_number) {
2784 ret = -EINVAL;
2785 break;
2786 }
2787 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
2788 uarg = arg;
2789 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2790 }
2791 break;
2792 }
2793 case MPT3EVENTQUERY:
2794 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
2795 ret = _ctl_eventquery(ioc, arg);
2796 break;
2797 case MPT3EVENTENABLE:
2798 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
2799 ret = _ctl_eventenable(ioc, arg);
2800 break;
2801 case MPT3EVENTREPORT:
2802 ret = _ctl_eventreport(ioc, arg);
2803 break;
2804 case MPT3HARDRESET:
2805 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
2806 ret = _ctl_do_reset(ioc, arg);
2807 break;
2808 case MPT3BTDHMAPPING:
2809 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
2810 ret = _ctl_btdh_mapping(ioc, arg);
2811 break;
2812 case MPT3DIAGREGISTER:
2813 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
2814 ret = _ctl_diag_register(ioc, arg);
2815 break;
2816 case MPT3DIAGUNREGISTER:
2817 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
2818 ret = _ctl_diag_unregister(ioc, arg);
2819 break;
2820 case MPT3DIAGQUERY:
2821 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
2822 ret = _ctl_diag_query(ioc, arg);
2823 break;
2824 case MPT3DIAGRELEASE:
2825 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
2826 ret = _ctl_diag_release(ioc, arg);
2827 break;
2828 case MPT3DIAGREADBUFFER:
2829 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
2830 ret = _ctl_diag_read_buffer(ioc, arg);
2831 break;
2832 case MPT3ADDNLDIAGQUERY:
2833 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query))
2834 ret = _ctl_addnl_diag_query(ioc, arg);
2835 break;
2836 case MPT3ENABLEDIAGSBRRELOAD:
2837 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_enable_diag_sbr_reload))
2838 ret = _ctl_enable_diag_sbr_reload(ioc, arg);
2839 break;
2840 default:
2841 dctlprintk(ioc,
2842 ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
2843 cmd));
2844 break;
2845 }
2846
2847 mutex_unlock(&ioc->ctl_cmds.mutex);
2848 out_unlock_pciaccess:
2849 mutex_unlock(&ioc->pci_access_mutex);
2850 return ret;
2851 }
2852
2853 /**
2854 * _ctl_get_mpt_mctp_passthru_adapter - Traverse the IOC list and return the IOC at
2855 * dev_index positionthat support MCTP passhtru
2856 * @dev_index: position in the mpt3sas_ioc_list to search for
2857 * Return pointer to the IOC on success
2858 * NULL if device not found error
2859 */
2860 static struct MPT3SAS_ADAPTER *
_ctl_get_mpt_mctp_passthru_adapter(int dev_index)2861 _ctl_get_mpt_mctp_passthru_adapter(int dev_index)
2862 {
2863 struct MPT3SAS_ADAPTER *ioc = NULL;
2864 int count = 0;
2865
2866 spin_lock(&gioc_lock);
2867 /* Traverse ioc list and return number of IOC that support MCTP passthru */
2868 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
2869 if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
2870 if (count == dev_index) {
2871 spin_unlock(&gioc_lock);
2872 return ioc;
2873 }
2874 count++;
2875 }
2876 }
2877 spin_unlock(&gioc_lock);
2878
2879 return NULL;
2880 }
2881
2882 /**
2883 * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
2884 * capable devices managed by the driver.
2885 *
2886 * Returns number of devices that support MCTP passthrough.
2887 */
2888 int
mpt3sas_get_device_count(void)2889 mpt3sas_get_device_count(void)
2890 {
2891 int count = 0;
2892 struct MPT3SAS_ADAPTER *ioc = NULL;
2893
2894 spin_lock(&gioc_lock);
2895 /* Traverse ioc list and return number of IOC that support MCTP passthru */
2896 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
2897 if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)
2898 count++;
2899
2900 spin_unlock(&gioc_lock);
2901
2902 return count;
2903 }
2904 EXPORT_SYMBOL(mpt3sas_get_device_count);
2905
2906 /**
2907 * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
2908 * firmware
2909 * @command: The MPI MCTP passthrough command to send to firmware
2910 *
2911 * Returns 0 on success, anything else is error.
2912 */
mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command * command)2913 int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command)
2914 {
2915 struct MPT3SAS_ADAPTER *ioc;
2916 MPI2RequestHeader_t *mpi_request = NULL, *request;
2917 MPI2DefaultReply_t *mpi_reply;
2918 Mpi26MctpPassthroughRequest_t *mctp_passthru_req;
2919 u16 smid;
2920 unsigned long timeout;
2921 u8 issue_reset = 0;
2922 u32 sz;
2923 void *psge;
2924 void *data_out = NULL;
2925 dma_addr_t data_out_dma = 0;
2926 size_t data_out_sz = 0;
2927 void *data_in = NULL;
2928 dma_addr_t data_in_dma = 0;
2929 size_t data_in_sz = 0;
2930 long ret;
2931
2932 /* Retrieve ioc from dev_index */
2933 ioc = _ctl_get_mpt_mctp_passthru_adapter(command->dev_index);
2934 if (!ioc)
2935 return -ENODEV;
2936
2937 mutex_lock(&ioc->pci_access_mutex);
2938 if (ioc->shost_recovery ||
2939 ioc->pci_error_recovery || ioc->is_driver_loading ||
2940 ioc->remove_host) {
2941 ret = -EAGAIN;
2942 goto unlock_pci_access;
2943 }
2944
2945 /* Lock the ctl_cmds mutex to ensure a single ctl cmd is pending */
2946 if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2947 ret = -ERESTARTSYS;
2948 goto unlock_pci_access;
2949 }
2950
2951 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2952 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
2953 ret = -EAGAIN;
2954 goto unlock_ctl_cmds;
2955 }
2956
2957 ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
2958 if (ret)
2959 goto unlock_ctl_cmds;
2960
2961 mpi_request = (MPI2RequestHeader_t *)command->mpi_request;
2962 if (mpi_request->Function != MPI2_FUNCTION_MCTP_PASSTHROUGH) {
2963 ioc_err(ioc, "%s: Invalid request received, Function 0x%x\n",
2964 __func__, mpi_request->Function);
2965 ret = -EINVAL;
2966 goto unlock_ctl_cmds;
2967 }
2968
2969 /* Use first reserved smid for passthrough commands */
2970 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
2971 ret = 0;
2972 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2973 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2974 request = mpt3sas_base_get_msg_frame(ioc, smid);
2975 memset(request, 0, ioc->request_sz);
2976 memcpy(request, command->mpi_request, sizeof(Mpi26MctpPassthroughRequest_t));
2977 ioc->ctl_cmds.smid = smid;
2978 data_out_sz = command->data_out_size;
2979 data_in_sz = command->data_in_size;
2980
2981 /* obtain dma-able memory for data transfer */
2982 if (data_out_sz) /* WRITE */ {
2983 data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
2984 &data_out_dma, GFP_ATOMIC);
2985 if (!data_out) {
2986 ret = -ENOMEM;
2987 mpt3sas_base_free_smid(ioc, smid);
2988 goto out;
2989 }
2990 memcpy(data_out, command->data_out_buf_ptr, data_out_sz);
2991
2992 }
2993
2994 if (data_in_sz) /* READ */ {
2995 data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
2996 &data_in_dma, GFP_ATOMIC);
2997 if (!data_in) {
2998 ret = -ENOMEM;
2999 mpt3sas_base_free_smid(ioc, smid);
3000 goto out;
3001 }
3002 }
3003
3004 psge = &((Mpi26MctpPassthroughRequest_t *)request)->H2DSGL;
3005
3006 init_completion(&ioc->ctl_cmds.done);
3007
3008 mctp_passthru_req = (Mpi26MctpPassthroughRequest_t *)request;
3009
3010 _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
3011 data_out_sz, data_in_dma, data_in_sz, smid);
3012
3013 timeout = command->timeout;
3014 if (timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
3015 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
3016
3017 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
3018 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
3019 mpt3sas_check_cmd_timeout(ioc,
3020 ioc->ctl_cmds.status, mpi_request,
3021 sizeof(Mpi26MctpPassthroughRequest_t) / 4, issue_reset);
3022 goto issue_host_reset;
3023 }
3024
3025 mpi_reply = ioc->ctl_cmds.reply;
3026
3027 /* copy out xdata to user */
3028 if (data_in_sz)
3029 memcpy(command->data_in_buf_ptr, data_in, data_in_sz);
3030
3031 /* copy out reply message frame to user */
3032 if (command->max_reply_bytes) {
3033 sz = min_t(u32, command->max_reply_bytes, ioc->reply_sz);
3034 memcpy(command->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz);
3035 }
3036
3037 issue_host_reset:
3038 if (issue_reset) {
3039 ret = -ENODATA;
3040 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3041 }
3042
3043 out:
3044 /* free memory associated with sg buffers */
3045 if (data_in)
3046 dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
3047 data_in_dma);
3048
3049 if (data_out)
3050 dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
3051 data_out_dma);
3052
3053 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
3054
3055 unlock_ctl_cmds:
3056 mutex_unlock(&ioc->ctl_cmds.mutex);
3057
3058 unlock_pci_access:
3059 mutex_unlock(&ioc->pci_access_mutex);
3060 return ret;
3061
3062 }
3063 EXPORT_SYMBOL(mpt3sas_send_mctp_passthru_req);
3064
3065 /**
3066 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
3067 * @file: (struct file)
3068 * @cmd: ioctl opcode
3069 * @arg: ?
3070 */
3071 static long
_ctl_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3072 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3073 {
3074 long ret;
3075
3076 /* pass MPI25_VERSION | MPI26_VERSION value,
3077 * to indicate that this ioctl cmd
3078 * came from mpt3ctl ioctl device.
3079 */
3080 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
3081 MPI25_VERSION | MPI26_VERSION);
3082 return ret;
3083 }
3084
3085 /**
3086 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
3087 * @file: (struct file)
3088 * @cmd: ioctl opcode
3089 * @arg: ?
3090 */
3091 static long
_ctl_mpt2_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3092 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3093 {
3094 long ret;
3095
3096 /* pass MPI2_VERSION value, to indicate that this ioctl cmd
3097 * came from mpt2ctl ioctl device.
3098 */
3099 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
3100 return ret;
3101 }
3102 #ifdef CONFIG_COMPAT
3103 /**
3104 * _ctl_ioctl_compat - main ioctl entry point (compat)
3105 * @file: ?
3106 * @cmd: ?
3107 * @arg: ?
3108 *
3109 * This routine handles 32 bit applications in 64bit os.
3110 */
3111 static long
_ctl_ioctl_compat(struct file * file,unsigned cmd,unsigned long arg)3112 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
3113 {
3114 long ret;
3115
3116 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
3117 MPI25_VERSION | MPI26_VERSION);
3118 return ret;
3119 }
3120
3121 /**
3122 * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
3123 * @file: ?
3124 * @cmd: ?
3125 * @arg: ?
3126 *
3127 * This routine handles 32 bit applications in 64bit os.
3128 */
3129 static long
_ctl_mpt2_ioctl_compat(struct file * file,unsigned cmd,unsigned long arg)3130 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
3131 {
3132 long ret;
3133
3134 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
3135 return ret;
3136 }
3137 #endif
3138
3139 /* scsi host attributes */
3140 /**
3141 * version_fw_show - firmware version
3142 * @cdev: pointer to embedded class device
3143 * @attr: ?
3144 * @buf: the buffer returned
3145 *
3146 * A sysfs 'read-only' shost attribute.
3147 */
3148 static ssize_t
version_fw_show(struct device * cdev,struct device_attribute * attr,char * buf)3149 version_fw_show(struct device *cdev, struct device_attribute *attr,
3150 char *buf)
3151 {
3152 struct Scsi_Host *shost = class_to_shost(cdev);
3153 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3154
3155 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
3156 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
3157 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
3158 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
3159 ioc->facts.FWVersion.Word & 0x000000FF);
3160 }
3161 static DEVICE_ATTR_RO(version_fw);
3162
3163 /**
3164 * version_bios_show - bios version
3165 * @cdev: pointer to embedded class device
3166 * @attr: ?
3167 * @buf: the buffer returned
3168 *
3169 * A sysfs 'read-only' shost attribute.
3170 */
3171 static ssize_t
version_bios_show(struct device * cdev,struct device_attribute * attr,char * buf)3172 version_bios_show(struct device *cdev, struct device_attribute *attr,
3173 char *buf)
3174 {
3175 struct Scsi_Host *shost = class_to_shost(cdev);
3176 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3177
3178 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
3179
3180 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
3181 (version & 0xFF000000) >> 24,
3182 (version & 0x00FF0000) >> 16,
3183 (version & 0x0000FF00) >> 8,
3184 version & 0x000000FF);
3185 }
3186 static DEVICE_ATTR_RO(version_bios);
3187
3188 /**
3189 * version_mpi_show - MPI (message passing interface) version
3190 * @cdev: pointer to embedded class device
3191 * @attr: ?
3192 * @buf: the buffer returned
3193 *
3194 * A sysfs 'read-only' shost attribute.
3195 */
3196 static ssize_t
version_mpi_show(struct device * cdev,struct device_attribute * attr,char * buf)3197 version_mpi_show(struct device *cdev, struct device_attribute *attr,
3198 char *buf)
3199 {
3200 struct Scsi_Host *shost = class_to_shost(cdev);
3201 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3202
3203 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
3204 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
3205 }
3206 static DEVICE_ATTR_RO(version_mpi);
3207
3208 /**
3209 * version_product_show - product name
3210 * @cdev: pointer to embedded class device
3211 * @attr: ?
3212 * @buf: the buffer returned
3213 *
3214 * A sysfs 'read-only' shost attribute.
3215 */
3216 static ssize_t
version_product_show(struct device * cdev,struct device_attribute * attr,char * buf)3217 version_product_show(struct device *cdev, struct device_attribute *attr,
3218 char *buf)
3219 {
3220 struct Scsi_Host *shost = class_to_shost(cdev);
3221 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3222
3223 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
3224 }
3225 static DEVICE_ATTR_RO(version_product);
3226
3227 /**
3228 * version_nvdata_persistent_show - ndvata persistent version
3229 * @cdev: pointer to embedded class device
3230 * @attr: ?
3231 * @buf: the buffer returned
3232 *
3233 * A sysfs 'read-only' shost attribute.
3234 */
3235 static ssize_t
version_nvdata_persistent_show(struct device * cdev,struct device_attribute * attr,char * buf)3236 version_nvdata_persistent_show(struct device *cdev,
3237 struct device_attribute *attr, char *buf)
3238 {
3239 struct Scsi_Host *shost = class_to_shost(cdev);
3240 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3241
3242 return snprintf(buf, PAGE_SIZE, "%08xh\n",
3243 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
3244 }
3245 static DEVICE_ATTR_RO(version_nvdata_persistent);
3246
3247 /**
3248 * version_nvdata_default_show - nvdata default version
3249 * @cdev: pointer to embedded class device
3250 * @attr: ?
3251 * @buf: the buffer returned
3252 *
3253 * A sysfs 'read-only' shost attribute.
3254 */
3255 static ssize_t
version_nvdata_default_show(struct device * cdev,struct device_attribute * attr,char * buf)3256 version_nvdata_default_show(struct device *cdev, struct device_attribute
3257 *attr, char *buf)
3258 {
3259 struct Scsi_Host *shost = class_to_shost(cdev);
3260 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3261
3262 return snprintf(buf, PAGE_SIZE, "%08xh\n",
3263 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
3264 }
3265 static DEVICE_ATTR_RO(version_nvdata_default);
3266
3267 /**
3268 * board_name_show - board name
3269 * @cdev: pointer to embedded class device
3270 * @attr: ?
3271 * @buf: the buffer returned
3272 *
3273 * A sysfs 'read-only' shost attribute.
3274 */
3275 static ssize_t
board_name_show(struct device * cdev,struct device_attribute * attr,char * buf)3276 board_name_show(struct device *cdev, struct device_attribute *attr,
3277 char *buf)
3278 {
3279 struct Scsi_Host *shost = class_to_shost(cdev);
3280 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3281
3282 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
3283 }
3284 static DEVICE_ATTR_RO(board_name);
3285
3286 /**
3287 * board_assembly_show - board assembly name
3288 * @cdev: pointer to embedded class device
3289 * @attr: ?
3290 * @buf: the buffer returned
3291 *
3292 * A sysfs 'read-only' shost attribute.
3293 */
3294 static ssize_t
board_assembly_show(struct device * cdev,struct device_attribute * attr,char * buf)3295 board_assembly_show(struct device *cdev, struct device_attribute *attr,
3296 char *buf)
3297 {
3298 struct Scsi_Host *shost = class_to_shost(cdev);
3299 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3300
3301 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
3302 }
3303 static DEVICE_ATTR_RO(board_assembly);
3304
3305 /**
3306 * board_tracer_show - board tracer number
3307 * @cdev: pointer to embedded class device
3308 * @attr: ?
3309 * @buf: the buffer returned
3310 *
3311 * A sysfs 'read-only' shost attribute.
3312 */
3313 static ssize_t
board_tracer_show(struct device * cdev,struct device_attribute * attr,char * buf)3314 board_tracer_show(struct device *cdev, struct device_attribute *attr,
3315 char *buf)
3316 {
3317 struct Scsi_Host *shost = class_to_shost(cdev);
3318 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3319
3320 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
3321 }
3322 static DEVICE_ATTR_RO(board_tracer);
3323
3324 /**
3325 * io_delay_show - io missing delay
3326 * @cdev: pointer to embedded class device
3327 * @attr: ?
3328 * @buf: the buffer returned
3329 *
3330 * This is for firmware implemention for deboucing device
3331 * removal events.
3332 *
3333 * A sysfs 'read-only' shost attribute.
3334 */
3335 static ssize_t
io_delay_show(struct device * cdev,struct device_attribute * attr,char * buf)3336 io_delay_show(struct device *cdev, struct device_attribute *attr,
3337 char *buf)
3338 {
3339 struct Scsi_Host *shost = class_to_shost(cdev);
3340 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3341
3342 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
3343 }
3344 static DEVICE_ATTR_RO(io_delay);
3345
3346 /**
3347 * device_delay_show - device missing delay
3348 * @cdev: pointer to embedded class device
3349 * @attr: ?
3350 * @buf: the buffer returned
3351 *
3352 * This is for firmware implemention for deboucing device
3353 * removal events.
3354 *
3355 * A sysfs 'read-only' shost attribute.
3356 */
3357 static ssize_t
device_delay_show(struct device * cdev,struct device_attribute * attr,char * buf)3358 device_delay_show(struct device *cdev, struct device_attribute *attr,
3359 char *buf)
3360 {
3361 struct Scsi_Host *shost = class_to_shost(cdev);
3362 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3363
3364 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
3365 }
3366 static DEVICE_ATTR_RO(device_delay);
3367
3368 /**
3369 * fw_queue_depth_show - global credits
3370 * @cdev: pointer to embedded class device
3371 * @attr: ?
3372 * @buf: the buffer returned
3373 *
3374 * This is firmware queue depth limit
3375 *
3376 * A sysfs 'read-only' shost attribute.
3377 */
3378 static ssize_t
fw_queue_depth_show(struct device * cdev,struct device_attribute * attr,char * buf)3379 fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
3380 char *buf)
3381 {
3382 struct Scsi_Host *shost = class_to_shost(cdev);
3383 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3384
3385 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
3386 }
3387 static DEVICE_ATTR_RO(fw_queue_depth);
3388
3389 /**
3390 * host_sas_address_show - sas address
3391 * @cdev: pointer to embedded class device
3392 * @attr: ?
3393 * @buf: the buffer returned
3394 *
3395 * This is the controller sas address
3396 *
3397 * A sysfs 'read-only' shost attribute.
3398 */
3399 static ssize_t
host_sas_address_show(struct device * cdev,struct device_attribute * attr,char * buf)3400 host_sas_address_show(struct device *cdev, struct device_attribute *attr,
3401 char *buf)
3402
3403 {
3404 struct Scsi_Host *shost = class_to_shost(cdev);
3405 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3406
3407 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3408 (unsigned long long)ioc->sas_hba.sas_address);
3409 }
3410 static DEVICE_ATTR_RO(host_sas_address);
3411
3412 /**
3413 * logging_level_show - logging level
3414 * @cdev: pointer to embedded class device
3415 * @attr: ?
3416 * @buf: the buffer returned
3417 *
3418 * A sysfs 'read/write' shost attribute.
3419 */
3420 static ssize_t
logging_level_show(struct device * cdev,struct device_attribute * attr,char * buf)3421 logging_level_show(struct device *cdev, struct device_attribute *attr,
3422 char *buf)
3423 {
3424 struct Scsi_Host *shost = class_to_shost(cdev);
3425 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3426
3427 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
3428 }
3429 static ssize_t
logging_level_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3430 logging_level_store(struct device *cdev, struct device_attribute *attr,
3431 const char *buf, size_t count)
3432 {
3433 struct Scsi_Host *shost = class_to_shost(cdev);
3434 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3435 int val = 0;
3436
3437 if (sscanf(buf, "%x", &val) != 1)
3438 return -EINVAL;
3439
3440 ioc->logging_level = val;
3441 ioc_info(ioc, "logging_level=%08xh\n",
3442 ioc->logging_level);
3443 return strlen(buf);
3444 }
3445 static DEVICE_ATTR_RW(logging_level);
3446
3447 /**
3448 * fwfault_debug_show - show/store fwfault_debug
3449 * @cdev: pointer to embedded class device
3450 * @attr: ?
3451 * @buf: the buffer returned
3452 *
3453 * mpt3sas_fwfault_debug is command line option
3454 * A sysfs 'read/write' shost attribute.
3455 */
3456 static ssize_t
fwfault_debug_show(struct device * cdev,struct device_attribute * attr,char * buf)3457 fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
3458 char *buf)
3459 {
3460 struct Scsi_Host *shost = class_to_shost(cdev);
3461 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3462
3463 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
3464 }
3465 static ssize_t
fwfault_debug_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3466 fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
3467 const char *buf, size_t count)
3468 {
3469 struct Scsi_Host *shost = class_to_shost(cdev);
3470 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3471 int val = 0;
3472
3473 if (sscanf(buf, "%d", &val) != 1)
3474 return -EINVAL;
3475
3476 ioc->fwfault_debug = val;
3477 ioc_info(ioc, "fwfault_debug=%d\n",
3478 ioc->fwfault_debug);
3479 return strlen(buf);
3480 }
3481 static DEVICE_ATTR_RW(fwfault_debug);
3482
3483 /**
3484 * ioc_reset_count_show - ioc reset count
3485 * @cdev: pointer to embedded class device
3486 * @attr: ?
3487 * @buf: the buffer returned
3488 *
3489 * This is firmware queue depth limit
3490 *
3491 * A sysfs 'read-only' shost attribute.
3492 */
3493 static ssize_t
ioc_reset_count_show(struct device * cdev,struct device_attribute * attr,char * buf)3494 ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
3495 char *buf)
3496 {
3497 struct Scsi_Host *shost = class_to_shost(cdev);
3498 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3499
3500 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
3501 }
3502 static DEVICE_ATTR_RO(ioc_reset_count);
3503
3504 /**
3505 * reply_queue_count_show - number of reply queues
3506 * @cdev: pointer to embedded class device
3507 * @attr: ?
3508 * @buf: the buffer returned
3509 *
3510 * This is number of reply queues
3511 *
3512 * A sysfs 'read-only' shost attribute.
3513 */
3514 static ssize_t
reply_queue_count_show(struct device * cdev,struct device_attribute * attr,char * buf)3515 reply_queue_count_show(struct device *cdev,
3516 struct device_attribute *attr, char *buf)
3517 {
3518 u8 reply_queue_count;
3519 struct Scsi_Host *shost = class_to_shost(cdev);
3520 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3521
3522 if ((ioc->facts.IOCCapabilities &
3523 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
3524 reply_queue_count = ioc->reply_queue_count;
3525 else
3526 reply_queue_count = 1;
3527
3528 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
3529 }
3530 static DEVICE_ATTR_RO(reply_queue_count);
3531
3532 /**
3533 * BRM_status_show - Backup Rail Monitor Status
3534 * @cdev: pointer to embedded class device
3535 * @attr: ?
3536 * @buf: the buffer returned
3537 *
3538 * This is number of reply queues
3539 *
3540 * A sysfs 'read-only' shost attribute.
3541 */
3542 static ssize_t
BRM_status_show(struct device * cdev,struct device_attribute * attr,char * buf)3543 BRM_status_show(struct device *cdev, struct device_attribute *attr,
3544 char *buf)
3545 {
3546 struct Scsi_Host *shost = class_to_shost(cdev);
3547 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3548 Mpi2IOUnitPage3_t io_unit_pg3;
3549 Mpi2ConfigReply_t mpi_reply;
3550 u16 backup_rail_monitor_status = 0;
3551 u16 ioc_status;
3552 int sz;
3553 ssize_t rc = 0;
3554
3555 if (!ioc->is_warpdrive) {
3556 ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
3557 __func__);
3558 return 0;
3559 }
3560 /* pci_access_mutex lock acquired by sysfs show path */
3561 mutex_lock(&ioc->pci_access_mutex);
3562 if (ioc->pci_error_recovery || ioc->remove_host)
3563 goto out;
3564
3565 sz = sizeof(io_unit_pg3);
3566 memset(&io_unit_pg3, 0, sz);
3567
3568 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) !=
3569 0) {
3570 ioc_err(ioc, "%s: failed reading iounit_pg3\n",
3571 __func__);
3572 rc = -EINVAL;
3573 goto out;
3574 }
3575
3576 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3577 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3578 ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
3579 __func__, ioc_status);
3580 rc = -EINVAL;
3581 goto out;
3582 }
3583
3584 if (io_unit_pg3.GPIOCount < 25) {
3585 ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n",
3586 __func__, io_unit_pg3.GPIOCount);
3587 rc = -EINVAL;
3588 goto out;
3589 }
3590
3591 /* BRM status is in bit zero of GPIOVal[24] */
3592 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]);
3593 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
3594
3595 out:
3596 mutex_unlock(&ioc->pci_access_mutex);
3597 return rc;
3598 }
3599 static DEVICE_ATTR_RO(BRM_status);
3600
3601 struct DIAG_BUFFER_START {
3602 __le32 Size;
3603 __le32 DiagVersion;
3604 u8 BufferType;
3605 u8 Reserved[3];
3606 __le32 Reserved1;
3607 __le32 Reserved2;
3608 __le32 Reserved3;
3609 };
3610
3611 /**
3612 * host_trace_buffer_size_show - host buffer size (trace only)
3613 * @cdev: pointer to embedded class device
3614 * @attr: ?
3615 * @buf: the buffer returned
3616 *
3617 * A sysfs 'read-only' shost attribute.
3618 */
3619 static ssize_t
host_trace_buffer_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3620 host_trace_buffer_size_show(struct device *cdev,
3621 struct device_attribute *attr, char *buf)
3622 {
3623 struct Scsi_Host *shost = class_to_shost(cdev);
3624 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3625 u32 size = 0;
3626 struct DIAG_BUFFER_START *request_data;
3627
3628 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3629 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3630 __func__);
3631 return 0;
3632 }
3633
3634 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3635 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3636 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3637 __func__);
3638 return 0;
3639 }
3640
3641 request_data = (struct DIAG_BUFFER_START *)
3642 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
3643 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
3644 le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
3645 le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
3646 le32_to_cpu(request_data->Reserved3) == 0x4742444c)
3647 size = le32_to_cpu(request_data->Size);
3648
3649 ioc->ring_buffer_sz = size;
3650 return snprintf(buf, PAGE_SIZE, "%d\n", size);
3651 }
3652 static DEVICE_ATTR_RO(host_trace_buffer_size);
3653
3654 /**
3655 * host_trace_buffer_show - firmware ring buffer (trace only)
3656 * @cdev: pointer to embedded class device
3657 * @attr: ?
3658 * @buf: the buffer returned
3659 *
3660 * A sysfs 'read/write' shost attribute.
3661 *
3662 * You will only be able to read 4k bytes of ring buffer at a time.
3663 * In order to read beyond 4k bytes, you will have to write out the
3664 * offset to the same attribute, it will move the pointer.
3665 */
3666 static ssize_t
host_trace_buffer_show(struct device * cdev,struct device_attribute * attr,char * buf)3667 host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
3668 char *buf)
3669 {
3670 struct Scsi_Host *shost = class_to_shost(cdev);
3671 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3672 void *request_data;
3673 u32 size;
3674
3675 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3676 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3677 __func__);
3678 return 0;
3679 }
3680
3681 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3682 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3683 ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
3684 __func__);
3685 return 0;
3686 }
3687
3688 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
3689 return 0;
3690
3691 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
3692 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3693 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
3694 memcpy(buf, request_data, size);
3695 return size;
3696 }
3697
3698 static ssize_t
host_trace_buffer_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3699 host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
3700 const char *buf, size_t count)
3701 {
3702 struct Scsi_Host *shost = class_to_shost(cdev);
3703 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3704 int val = 0;
3705
3706 if (sscanf(buf, "%d", &val) != 1)
3707 return -EINVAL;
3708
3709 ioc->ring_buffer_offset = val;
3710 return strlen(buf);
3711 }
3712 static DEVICE_ATTR_RW(host_trace_buffer);
3713
3714
3715 /*****************************************/
3716
3717 /**
3718 * host_trace_buffer_enable_show - firmware ring buffer (trace only)
3719 * @cdev: pointer to embedded class device
3720 * @attr: ?
3721 * @buf: the buffer returned
3722 *
3723 * A sysfs 'read/write' shost attribute.
3724 *
3725 * This is a mechnism to post/release host_trace_buffers
3726 */
3727 static ssize_t
host_trace_buffer_enable_show(struct device * cdev,struct device_attribute * attr,char * buf)3728 host_trace_buffer_enable_show(struct device *cdev,
3729 struct device_attribute *attr, char *buf)
3730 {
3731 struct Scsi_Host *shost = class_to_shost(cdev);
3732 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3733
3734 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
3735 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3736 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
3737 return snprintf(buf, PAGE_SIZE, "off\n");
3738 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3739 MPT3_DIAG_BUFFER_IS_RELEASED))
3740 return snprintf(buf, PAGE_SIZE, "release\n");
3741 else
3742 return snprintf(buf, PAGE_SIZE, "post\n");
3743 }
3744
3745 static ssize_t
host_trace_buffer_enable_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3746 host_trace_buffer_enable_store(struct device *cdev,
3747 struct device_attribute *attr, const char *buf, size_t count)
3748 {
3749 struct Scsi_Host *shost = class_to_shost(cdev);
3750 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3751 char str[10] = "";
3752 struct mpt3_diag_register diag_register;
3753 u8 issue_reset = 0;
3754
3755 /* don't allow post/release occurr while recovery is active */
3756 if (ioc->shost_recovery || ioc->remove_host ||
3757 ioc->pci_error_recovery || ioc->is_driver_loading)
3758 return -EBUSY;
3759
3760 if (sscanf(buf, "%9s", str) != 1)
3761 return -EINVAL;
3762
3763 if (!strcmp(str, "post")) {
3764 /* exit out if host buffers are already posted */
3765 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
3766 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3767 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
3768 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3769 MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3770 goto out;
3771 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3772 ioc_info(ioc, "posting host trace buffers\n");
3773 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3774
3775 if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
3776 ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
3777 /* post the same buffer allocated previously */
3778 diag_register.requested_buffer_size =
3779 ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
3780 } else {
3781 /*
3782 * Free the diag buffer memory which was previously
3783 * allocated by an application.
3784 */
3785 if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
3786 &&
3787 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3788 MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
3789 dma_free_coherent(&ioc->pdev->dev,
3790 ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE],
3791 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
3792 ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]);
3793 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
3794 NULL;
3795 }
3796
3797 diag_register.requested_buffer_size = (1024 * 1024);
3798 }
3799
3800 diag_register.unique_id =
3801 (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
3802 (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
3803 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
3804 _ctl_diag_register_2(ioc, &diag_register);
3805 if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3806 MPT3_DIAG_BUFFER_IS_REGISTERED) {
3807 ioc_info(ioc,
3808 "Trace buffer %d KB allocated through sysfs\n",
3809 diag_register.requested_buffer_size>>10);
3810 if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
3811 ioc->diag_buffer_status[
3812 MPI2_DIAG_BUF_TYPE_TRACE] |=
3813 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
3814 }
3815 } else if (!strcmp(str, "release")) {
3816 /* exit out if host buffers are already released */
3817 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
3818 goto out;
3819 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3820 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
3821 goto out;
3822 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3823 MPT3_DIAG_BUFFER_IS_RELEASED))
3824 goto out;
3825 ioc_info(ioc, "releasing host trace buffer\n");
3826 ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS;
3827 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3828 &issue_reset);
3829 }
3830
3831 out:
3832 return strlen(buf);
3833 }
3834 static DEVICE_ATTR_RW(host_trace_buffer_enable);
3835
3836 /*********** diagnostic trigger suppport *********************************/
3837
3838 /**
3839 * diag_trigger_master_show - show the diag_trigger_master attribute
3840 * @cdev: pointer to embedded class device
3841 * @attr: ?
3842 * @buf: the buffer returned
3843 *
3844 * A sysfs 'read/write' shost attribute.
3845 */
3846 static ssize_t
diag_trigger_master_show(struct device * cdev,struct device_attribute * attr,char * buf)3847 diag_trigger_master_show(struct device *cdev,
3848 struct device_attribute *attr, char *buf)
3849
3850 {
3851 struct Scsi_Host *shost = class_to_shost(cdev);
3852 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3853 unsigned long flags;
3854 ssize_t rc;
3855
3856 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3857 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
3858 memcpy(buf, &ioc->diag_trigger_master, rc);
3859 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3860 return rc;
3861 }
3862
3863 /**
3864 * diag_trigger_master_store - store the diag_trigger_master attribute
3865 * @cdev: pointer to embedded class device
3866 * @attr: ?
3867 * @buf: the buffer returned
3868 * @count: ?
3869 *
3870 * A sysfs 'read/write' shost attribute.
3871 */
3872 static ssize_t
diag_trigger_master_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3873 diag_trigger_master_store(struct device *cdev,
3874 struct device_attribute *attr, const char *buf, size_t count)
3875
3876 {
3877 struct Scsi_Host *shost = class_to_shost(cdev);
3878 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3879 struct SL_WH_MASTER_TRIGGER_T *master_tg;
3880 unsigned long flags;
3881 ssize_t rc;
3882 bool set = 1;
3883
3884 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
3885
3886 if (ioc->supports_trigger_pages) {
3887 master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T),
3888 GFP_KERNEL);
3889 if (!master_tg)
3890 return -ENOMEM;
3891
3892 memcpy(master_tg, buf, rc);
3893 if (!master_tg->MasterData)
3894 set = 0;
3895 if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg,
3896 set)) {
3897 kfree(master_tg);
3898 return -EFAULT;
3899 }
3900 kfree(master_tg);
3901 }
3902
3903 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3904 memset(&ioc->diag_trigger_master, 0,
3905 sizeof(struct SL_WH_MASTER_TRIGGER_T));
3906 memcpy(&ioc->diag_trigger_master, buf, rc);
3907 ioc->diag_trigger_master.MasterData |=
3908 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
3909 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3910 return rc;
3911 }
3912 static DEVICE_ATTR_RW(diag_trigger_master);
3913
3914
3915 /**
3916 * diag_trigger_event_show - show the diag_trigger_event attribute
3917 * @cdev: pointer to embedded class device
3918 * @attr: ?
3919 * @buf: the buffer returned
3920 *
3921 * A sysfs 'read/write' shost attribute.
3922 */
3923 static ssize_t
diag_trigger_event_show(struct device * cdev,struct device_attribute * attr,char * buf)3924 diag_trigger_event_show(struct device *cdev,
3925 struct device_attribute *attr, char *buf)
3926 {
3927 struct Scsi_Host *shost = class_to_shost(cdev);
3928 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3929 unsigned long flags;
3930 ssize_t rc;
3931
3932 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3933 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
3934 memcpy(buf, &ioc->diag_trigger_event, rc);
3935 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3936 return rc;
3937 }
3938
3939 /**
3940 * diag_trigger_event_store - store the diag_trigger_event attribute
3941 * @cdev: pointer to embedded class device
3942 * @attr: ?
3943 * @buf: the buffer returned
3944 * @count: ?
3945 *
3946 * A sysfs 'read/write' shost attribute.
3947 */
3948 static ssize_t
diag_trigger_event_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3949 diag_trigger_event_store(struct device *cdev,
3950 struct device_attribute *attr, const char *buf, size_t count)
3951
3952 {
3953 struct Scsi_Host *shost = class_to_shost(cdev);
3954 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3955 struct SL_WH_EVENT_TRIGGERS_T *event_tg;
3956 unsigned long flags;
3957 ssize_t sz;
3958 bool set = 1;
3959
3960 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
3961 if (ioc->supports_trigger_pages) {
3962 event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T),
3963 GFP_KERNEL);
3964 if (!event_tg)
3965 return -ENOMEM;
3966
3967 memcpy(event_tg, buf, sz);
3968 if (!event_tg->ValidEntries)
3969 set = 0;
3970 if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg,
3971 set)) {
3972 kfree(event_tg);
3973 return -EFAULT;
3974 }
3975 kfree(event_tg);
3976 }
3977
3978 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3979
3980 memset(&ioc->diag_trigger_event, 0,
3981 sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3982 memcpy(&ioc->diag_trigger_event, buf, sz);
3983 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
3984 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
3985 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3986 return sz;
3987 }
3988 static DEVICE_ATTR_RW(diag_trigger_event);
3989
3990
3991 /**
3992 * diag_trigger_scsi_show - show the diag_trigger_scsi attribute
3993 * @cdev: pointer to embedded class device
3994 * @attr: ?
3995 * @buf: the buffer returned
3996 *
3997 * A sysfs 'read/write' shost attribute.
3998 */
3999 static ssize_t
diag_trigger_scsi_show(struct device * cdev,struct device_attribute * attr,char * buf)4000 diag_trigger_scsi_show(struct device *cdev,
4001 struct device_attribute *attr, char *buf)
4002 {
4003 struct Scsi_Host *shost = class_to_shost(cdev);
4004 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4005 unsigned long flags;
4006 ssize_t rc;
4007
4008 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
4009 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
4010 memcpy(buf, &ioc->diag_trigger_scsi, rc);
4011 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
4012 return rc;
4013 }
4014
4015 /**
4016 * diag_trigger_scsi_store - store the diag_trigger_scsi attribute
4017 * @cdev: pointer to embedded class device
4018 * @attr: ?
4019 * @buf: the buffer returned
4020 * @count: ?
4021 *
4022 * A sysfs 'read/write' shost attribute.
4023 */
4024 static ssize_t
diag_trigger_scsi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)4025 diag_trigger_scsi_store(struct device *cdev,
4026 struct device_attribute *attr, const char *buf, size_t count)
4027 {
4028 struct Scsi_Host *shost = class_to_shost(cdev);
4029 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4030 struct SL_WH_SCSI_TRIGGERS_T *scsi_tg;
4031 unsigned long flags;
4032 ssize_t sz;
4033 bool set = 1;
4034
4035 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
4036 if (ioc->supports_trigger_pages) {
4037 scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T),
4038 GFP_KERNEL);
4039 if (!scsi_tg)
4040 return -ENOMEM;
4041
4042 memcpy(scsi_tg, buf, sz);
4043 if (!scsi_tg->ValidEntries)
4044 set = 0;
4045 if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg,
4046 set)) {
4047 kfree(scsi_tg);
4048 return -EFAULT;
4049 }
4050 kfree(scsi_tg);
4051 }
4052
4053 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
4054
4055 memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
4056 memcpy(&ioc->diag_trigger_scsi, buf, sz);
4057 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
4058 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
4059 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
4060 return sz;
4061 }
4062 static DEVICE_ATTR_RW(diag_trigger_scsi);
4063
4064
4065 /**
4066 * diag_trigger_mpi_show - show the diag_trigger_mpi attribute
4067 * @cdev: pointer to embedded class device
4068 * @attr: ?
4069 * @buf: the buffer returned
4070 *
4071 * A sysfs 'read/write' shost attribute.
4072 */
4073 static ssize_t
diag_trigger_mpi_show(struct device * cdev,struct device_attribute * attr,char * buf)4074 diag_trigger_mpi_show(struct device *cdev,
4075 struct device_attribute *attr, char *buf)
4076 {
4077 struct Scsi_Host *shost = class_to_shost(cdev);
4078 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4079 unsigned long flags;
4080 ssize_t rc;
4081
4082 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
4083 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
4084 memcpy(buf, &ioc->diag_trigger_mpi, rc);
4085 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
4086 return rc;
4087 }
4088
4089 /**
4090 * diag_trigger_mpi_store - store the diag_trigger_mpi attribute
4091 * @cdev: pointer to embedded class device
4092 * @attr: ?
4093 * @buf: the buffer returned
4094 * @count: ?
4095 *
4096 * A sysfs 'read/write' shost attribute.
4097 */
4098 static ssize_t
diag_trigger_mpi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)4099 diag_trigger_mpi_store(struct device *cdev,
4100 struct device_attribute *attr, const char *buf, size_t count)
4101 {
4102 struct Scsi_Host *shost = class_to_shost(cdev);
4103 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4104 struct SL_WH_MPI_TRIGGERS_T *mpi_tg;
4105 unsigned long flags;
4106 ssize_t sz;
4107 bool set = 1;
4108
4109 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
4110 if (ioc->supports_trigger_pages) {
4111 mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T),
4112 GFP_KERNEL);
4113 if (!mpi_tg)
4114 return -ENOMEM;
4115
4116 memcpy(mpi_tg, buf, sz);
4117 if (!mpi_tg->ValidEntries)
4118 set = 0;
4119 if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg,
4120 set)) {
4121 kfree(mpi_tg);
4122 return -EFAULT;
4123 }
4124 kfree(mpi_tg);
4125 }
4126
4127 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
4128 memset(&ioc->diag_trigger_mpi, 0,
4129 sizeof(ioc->diag_trigger_mpi));
4130 memcpy(&ioc->diag_trigger_mpi, buf, sz);
4131 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
4132 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
4133 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
4134 return sz;
4135 }
4136
4137 static DEVICE_ATTR_RW(diag_trigger_mpi);
4138
4139 /*********** diagnostic trigger suppport *** END ****************************/
4140
4141 /*****************************************/
4142
4143 /**
4144 * drv_support_bitmap_show - driver supported feature bitmap
4145 * @cdev: pointer to embedded class device
4146 * @attr: unused
4147 * @buf: the buffer returned
4148 *
4149 * A sysfs 'read-only' shost attribute.
4150 */
4151 static ssize_t
drv_support_bitmap_show(struct device * cdev,struct device_attribute * attr,char * buf)4152 drv_support_bitmap_show(struct device *cdev,
4153 struct device_attribute *attr, char *buf)
4154 {
4155 struct Scsi_Host *shost = class_to_shost(cdev);
4156 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4157
4158 return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
4159 }
4160 static DEVICE_ATTR_RO(drv_support_bitmap);
4161
4162 /**
4163 * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled
4164 * @cdev: pointer to embedded class device
4165 * @attr: unused
4166 * @buf: the buffer returned
4167 *
4168 * A sysfs read/write shost attribute. This attribute is used to set the
4169 * targets queue depth to HBA IO queue depth if this attribute is enabled.
4170 */
4171 static ssize_t
enable_sdev_max_qd_show(struct device * cdev,struct device_attribute * attr,char * buf)4172 enable_sdev_max_qd_show(struct device *cdev,
4173 struct device_attribute *attr, char *buf)
4174 {
4175 struct Scsi_Host *shost = class_to_shost(cdev);
4176 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4177
4178 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
4179 }
4180
4181 /**
4182 * enable_sdev_max_qd_store - Enable/disable sdev max qd
4183 * @cdev: pointer to embedded class device
4184 * @attr: unused
4185 * @buf: the buffer returned
4186 * @count: unused
4187 *
4188 * A sysfs read/write shost attribute. This attribute is used to set the
4189 * targets queue depth to HBA IO queue depth if this attribute is enabled.
4190 * If this attribute is disabled then targets will have corresponding default
4191 * queue depth.
4192 */
4193 static ssize_t
enable_sdev_max_qd_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)4194 enable_sdev_max_qd_store(struct device *cdev,
4195 struct device_attribute *attr, const char *buf, size_t count)
4196 {
4197 struct Scsi_Host *shost = class_to_shost(cdev);
4198 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4199 struct MPT3SAS_DEVICE *sas_device_priv_data;
4200 struct MPT3SAS_TARGET *sas_target_priv_data;
4201 int val = 0;
4202 struct scsi_device *sdev;
4203 struct _raid_device *raid_device;
4204 int qdepth;
4205
4206 if (kstrtoint(buf, 0, &val) != 0)
4207 return -EINVAL;
4208
4209 switch (val) {
4210 case 0:
4211 ioc->enable_sdev_max_qd = 0;
4212 shost_for_each_device(sdev, ioc->shost) {
4213 sas_device_priv_data = sdev->hostdata;
4214 if (!sas_device_priv_data)
4215 continue;
4216 sas_target_priv_data = sas_device_priv_data->sas_target;
4217 if (!sas_target_priv_data)
4218 continue;
4219
4220 if (sas_target_priv_data->flags &
4221 MPT_TARGET_FLAGS_VOLUME) {
4222 raid_device =
4223 mpt3sas_raid_device_find_by_handle(ioc,
4224 sas_target_priv_data->handle);
4225
4226 switch (raid_device->volume_type) {
4227 case MPI2_RAID_VOL_TYPE_RAID0:
4228 if (raid_device->device_info &
4229 MPI2_SAS_DEVICE_INFO_SSP_TARGET)
4230 qdepth =
4231 MPT3SAS_SAS_QUEUE_DEPTH;
4232 else
4233 qdepth =
4234 MPT3SAS_SATA_QUEUE_DEPTH;
4235 break;
4236 case MPI2_RAID_VOL_TYPE_RAID1E:
4237 case MPI2_RAID_VOL_TYPE_RAID1:
4238 case MPI2_RAID_VOL_TYPE_RAID10:
4239 case MPI2_RAID_VOL_TYPE_UNKNOWN:
4240 default:
4241 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
4242 }
4243 } else if (sas_target_priv_data->flags &
4244 MPT_TARGET_FLAGS_PCIE_DEVICE)
4245 qdepth = ioc->max_nvme_qd;
4246 else
4247 qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ?
4248 ioc->max_wideport_qd : ioc->max_narrowport_qd;
4249
4250 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
4251 }
4252 break;
4253 case 1:
4254 ioc->enable_sdev_max_qd = 1;
4255 shost_for_each_device(sdev, ioc->shost)
4256 mpt3sas_scsih_change_queue_depth(sdev,
4257 shost->can_queue);
4258 break;
4259 default:
4260 return -EINVAL;
4261 }
4262
4263 return strlen(buf);
4264 }
4265 static DEVICE_ATTR_RW(enable_sdev_max_qd);
4266
4267 static struct attribute *mpt3sas_host_attrs[] = {
4268 &dev_attr_version_fw.attr,
4269 &dev_attr_version_bios.attr,
4270 &dev_attr_version_mpi.attr,
4271 &dev_attr_version_product.attr,
4272 &dev_attr_version_nvdata_persistent.attr,
4273 &dev_attr_version_nvdata_default.attr,
4274 &dev_attr_board_name.attr,
4275 &dev_attr_board_assembly.attr,
4276 &dev_attr_board_tracer.attr,
4277 &dev_attr_io_delay.attr,
4278 &dev_attr_device_delay.attr,
4279 &dev_attr_logging_level.attr,
4280 &dev_attr_fwfault_debug.attr,
4281 &dev_attr_fw_queue_depth.attr,
4282 &dev_attr_host_sas_address.attr,
4283 &dev_attr_ioc_reset_count.attr,
4284 &dev_attr_host_trace_buffer_size.attr,
4285 &dev_attr_host_trace_buffer.attr,
4286 &dev_attr_host_trace_buffer_enable.attr,
4287 &dev_attr_reply_queue_count.attr,
4288 &dev_attr_diag_trigger_master.attr,
4289 &dev_attr_diag_trigger_event.attr,
4290 &dev_attr_diag_trigger_scsi.attr,
4291 &dev_attr_diag_trigger_mpi.attr,
4292 &dev_attr_drv_support_bitmap.attr,
4293 &dev_attr_BRM_status.attr,
4294 &dev_attr_enable_sdev_max_qd.attr,
4295 NULL,
4296 };
4297
4298 static const struct attribute_group mpt3sas_host_attr_group = {
4299 .attrs = mpt3sas_host_attrs
4300 };
4301
4302 const struct attribute_group *mpt3sas_host_groups[] = {
4303 &mpt3sas_host_attr_group,
4304 NULL
4305 };
4306
4307 /* device attributes */
4308
4309 /**
4310 * sas_address_show - sas address
4311 * @dev: pointer to embedded class device
4312 * @attr: ?
4313 * @buf: the buffer returned
4314 *
4315 * This is the sas address for the target
4316 *
4317 * A sysfs 'read-only' shost attribute.
4318 */
4319 static ssize_t
sas_address_show(struct device * dev,struct device_attribute * attr,char * buf)4320 sas_address_show(struct device *dev, struct device_attribute *attr,
4321 char *buf)
4322 {
4323 struct scsi_device *sdev = to_scsi_device(dev);
4324 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
4325
4326 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
4327 (unsigned long long)sas_device_priv_data->sas_target->sas_address);
4328 }
4329 static DEVICE_ATTR_RO(sas_address);
4330
4331 /**
4332 * sas_device_handle_show - device handle
4333 * @dev: pointer to embedded class device
4334 * @attr: ?
4335 * @buf: the buffer returned
4336 *
4337 * This is the firmware assigned device handle
4338 *
4339 * A sysfs 'read-only' shost attribute.
4340 */
4341 static ssize_t
sas_device_handle_show(struct device * dev,struct device_attribute * attr,char * buf)4342 sas_device_handle_show(struct device *dev, struct device_attribute *attr,
4343 char *buf)
4344 {
4345 struct scsi_device *sdev = to_scsi_device(dev);
4346 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
4347
4348 return snprintf(buf, PAGE_SIZE, "0x%04x\n",
4349 sas_device_priv_data->sas_target->handle);
4350 }
4351 static DEVICE_ATTR_RO(sas_device_handle);
4352
4353 /**
4354 * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
4355 * @dev: pointer to embedded device
4356 * @attr: sas_ncq_prio_supported attribute descriptor
4357 * @buf: the buffer returned
4358 *
4359 * A sysfs 'read-only' sdev attribute, only works with SATA
4360 */
4361 static ssize_t
sas_ncq_prio_supported_show(struct device * dev,struct device_attribute * attr,char * buf)4362 sas_ncq_prio_supported_show(struct device *dev,
4363 struct device_attribute *attr, char *buf)
4364 {
4365 struct scsi_device *sdev = to_scsi_device(dev);
4366
4367 return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
4368 }
4369 static DEVICE_ATTR_RO(sas_ncq_prio_supported);
4370
4371 /**
4372 * sas_ncq_prio_enable_show - send prioritized io commands to device
4373 * @dev: pointer to embedded device
4374 * @attr: ?
4375 * @buf: the buffer returned
4376 *
4377 * A sysfs 'read/write' sdev attribute, only works with SATA
4378 */
4379 static ssize_t
sas_ncq_prio_enable_show(struct device * dev,struct device_attribute * attr,char * buf)4380 sas_ncq_prio_enable_show(struct device *dev,
4381 struct device_attribute *attr, char *buf)
4382 {
4383 struct scsi_device *sdev = to_scsi_device(dev);
4384 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
4385
4386 return snprintf(buf, PAGE_SIZE, "%d\n",
4387 sas_device_priv_data->ncq_prio_enable);
4388 }
4389
4390 static ssize_t
sas_ncq_prio_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4391 sas_ncq_prio_enable_store(struct device *dev,
4392 struct device_attribute *attr,
4393 const char *buf, size_t count)
4394 {
4395 struct scsi_device *sdev = to_scsi_device(dev);
4396 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
4397 bool ncq_prio_enable = 0;
4398
4399 if (kstrtobool(buf, &ncq_prio_enable))
4400 return -EINVAL;
4401
4402 if (!sas_ata_ncq_prio_supported(sdev))
4403 return -EINVAL;
4404
4405 sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
4406 return strlen(buf);
4407 }
4408 static DEVICE_ATTR_RW(sas_ncq_prio_enable);
4409
4410 static struct attribute *mpt3sas_dev_attrs[] = {
4411 &dev_attr_sas_address.attr,
4412 &dev_attr_sas_device_handle.attr,
4413 &dev_attr_sas_ncq_prio_supported.attr,
4414 &dev_attr_sas_ncq_prio_enable.attr,
4415 NULL,
4416 };
4417
4418 static const struct attribute_group mpt3sas_dev_attr_group = {
4419 .attrs = mpt3sas_dev_attrs
4420 };
4421
4422 const struct attribute_group *mpt3sas_dev_groups[] = {
4423 &mpt3sas_dev_attr_group,
4424 NULL
4425 };
4426
4427 /* file operations table for mpt3ctl device */
4428 static const struct file_operations ctl_fops = {
4429 .owner = THIS_MODULE,
4430 .unlocked_ioctl = _ctl_ioctl,
4431 .poll = _ctl_poll,
4432 .fasync = _ctl_fasync,
4433 #ifdef CONFIG_COMPAT
4434 .compat_ioctl = _ctl_ioctl_compat,
4435 #endif
4436 };
4437
4438 /* file operations table for mpt2ctl device */
4439 static const struct file_operations ctl_gen2_fops = {
4440 .owner = THIS_MODULE,
4441 .unlocked_ioctl = _ctl_mpt2_ioctl,
4442 .poll = _ctl_poll,
4443 .fasync = _ctl_fasync,
4444 #ifdef CONFIG_COMPAT
4445 .compat_ioctl = _ctl_mpt2_ioctl_compat,
4446 #endif
4447 };
4448
4449 static struct miscdevice ctl_dev = {
4450 .minor = MPT3SAS_MINOR,
4451 .name = MPT3SAS_DEV_NAME,
4452 .fops = &ctl_fops,
4453 };
4454
4455 static struct miscdevice gen2_ctl_dev = {
4456 .minor = MPT2SAS_MINOR,
4457 .name = MPT2SAS_DEV_NAME,
4458 .fops = &ctl_gen2_fops,
4459 };
4460
4461 /**
4462 * mpt3sas_ctl_init - main entry point for ctl.
4463 * @hbas_to_enumerate: ?
4464 */
4465 void
mpt3sas_ctl_init(ushort hbas_to_enumerate)4466 mpt3sas_ctl_init(ushort hbas_to_enumerate)
4467 {
4468 async_queue = NULL;
4469
4470 /* Don't register mpt3ctl ioctl device if
4471 * hbas_to_enumarate is one.
4472 */
4473 if (hbas_to_enumerate != 1)
4474 if (misc_register(&ctl_dev) < 0)
4475 pr_err("%s can't register misc device [minor=%d]\n",
4476 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
4477
4478 /* Don't register mpt3ctl ioctl device if
4479 * hbas_to_enumarate is two.
4480 */
4481 if (hbas_to_enumerate != 2)
4482 if (misc_register(&gen2_ctl_dev) < 0)
4483 pr_err("%s can't register misc device [minor=%d]\n",
4484 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
4485
4486 init_waitqueue_head(&ctl_poll_wait);
4487 }
4488
4489 /**
4490 * mpt3sas_ctl_release - release dma for ctl
4491 * @ioc: per adapter object
4492 */
4493 void
mpt3sas_ctl_release(struct MPT3SAS_ADAPTER * ioc)4494 mpt3sas_ctl_release(struct MPT3SAS_ADAPTER *ioc)
4495 {
4496 int i;
4497
4498 /* free memory associated to diag buffers */
4499 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
4500 if (!ioc->diag_buffer[i])
4501 continue;
4502 dma_free_coherent(&ioc->pdev->dev,
4503 ioc->diag_buffer_sz[i],
4504 ioc->diag_buffer[i],
4505 ioc->diag_buffer_dma[i]);
4506 ioc->diag_buffer[i] = NULL;
4507 ioc->diag_buffer_status[i] = 0;
4508 }
4509
4510 kfree(ioc->event_log);
4511 }
4512
4513 /**
4514 * mpt3sas_ctl_exit - exit point for ctl
4515 * @hbas_to_enumerate: ?
4516 */
4517 void
mpt3sas_ctl_exit(ushort hbas_to_enumerate)4518 mpt3sas_ctl_exit(ushort hbas_to_enumerate)
4519 {
4520
4521 if (hbas_to_enumerate != 1)
4522 misc_deregister(&ctl_dev);
4523 if (hbas_to_enumerate != 2)
4524 misc_deregister(&gen2_ctl_dev);
4525 }
4526