xref: /linux/drivers/scsi/mpt3sas/mpt3sas_base.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012-2013  LSI Corporation
7  *  (mailto:DL-MPTFusionLinux@lsi.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/kernel.h>
46 #include <linux/module.h>
47 #include <linux/errno.h>
48 #include <linux/init.h>
49 #include <linux/slab.h>
50 #include <linux/types.h>
51 #include <linux/pci.h>
52 #include <linux/kdev_t.h>
53 #include <linux/blkdev.h>
54 #include <linux/delay.h>
55 #include <linux/interrupt.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/io.h>
58 #include <linux/time.h>
59 #include <linux/kthread.h>
60 #include <linux/aer.h>
61 
62 
63 #include "mpt3sas_base.h"
64 
65 static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
66 
67 
68 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
69 
70  /* maximum controller queue depth */
71 #define MAX_HBA_QUEUE_DEPTH	30000
72 #define MAX_CHAIN_DEPTH		100000
73 static int max_queue_depth = -1;
74 module_param(max_queue_depth, int, 0);
75 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
76 
77 static int max_sgl_entries = -1;
78 module_param(max_sgl_entries, int, 0);
79 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
80 
81 static int msix_disable = -1;
82 module_param(msix_disable, int, 0);
83 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
84 
85 static int max_msix_vectors = 8;
86 module_param(max_msix_vectors, int, 0);
87 MODULE_PARM_DESC(max_msix_vectors,
88 	" max msix vectors - (default=8)");
89 
90 static int mpt3sas_fwfault_debug;
91 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
92 	" enable detection of firmware fault and halt firmware - (default=0)");
93 
94 
95 /**
96  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
97  *
98  */
99 static int
100 _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
101 {
102 	int ret = param_set_int(val, kp);
103 	struct MPT3SAS_ADAPTER *ioc;
104 
105 	if (ret)
106 		return ret;
107 
108 	pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
109 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
110 		ioc->fwfault_debug = mpt3sas_fwfault_debug;
111 	return 0;
112 }
113 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
114 	param_get_int, &mpt3sas_fwfault_debug, 0644);
115 
116 /**
117  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
118  * @arg: input argument, used to derive ioc
119  *
120  * Return 0 if controller is removed from pci subsystem.
121  * Return -1 for other case.
122  */
123 static int mpt3sas_remove_dead_ioc_func(void *arg)
124 {
125 	struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
126 	struct pci_dev *pdev;
127 
128 	if ((ioc == NULL))
129 		return -1;
130 
131 	pdev = ioc->pdev;
132 	if ((pdev == NULL))
133 		return -1;
134 	pci_stop_and_remove_bus_device_locked(pdev);
135 	return 0;
136 }
137 
138 /**
139  * _base_fault_reset_work - workq handling ioc fault conditions
140  * @work: input argument, used to derive ioc
141  * Context: sleep.
142  *
143  * Return nothing.
144  */
145 static void
146 _base_fault_reset_work(struct work_struct *work)
147 {
148 	struct MPT3SAS_ADAPTER *ioc =
149 	    container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
150 	unsigned long	 flags;
151 	u32 doorbell;
152 	int rc;
153 	struct task_struct *p;
154 
155 
156 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
157 	if (ioc->shost_recovery)
158 		goto rearm_timer;
159 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
160 
161 	doorbell = mpt3sas_base_get_iocstate(ioc, 0);
162 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
163 		pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
164 		    ioc->name);
165 
166 		/*
167 		 * Call _scsih_flush_pending_cmds callback so that we flush all
168 		 * pending commands back to OS. This call is required to aovid
169 		 * deadlock at block layer. Dead IOC will fail to do diag reset,
170 		 * and this call is safe since dead ioc will never return any
171 		 * command back from HW.
172 		 */
173 		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
174 		/*
175 		 * Set remove_host flag early since kernel thread will
176 		 * take some time to execute.
177 		 */
178 		ioc->remove_host = 1;
179 		/*Remove the Dead Host */
180 		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
181 		    "mpt3sas_dead_ioc_%d", ioc->id);
182 		if (IS_ERR(p))
183 			pr_err(MPT3SAS_FMT
184 			"%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
185 			ioc->name, __func__);
186 		else
187 			pr_err(MPT3SAS_FMT
188 			"%s: Running mpt3sas_dead_ioc thread success !!!!\n",
189 			ioc->name, __func__);
190 		return; /* don't rearm timer */
191 	}
192 
193 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
194 		rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
195 		    FORCE_BIG_HAMMER);
196 		pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
197 		    __func__, (rc == 0) ? "success" : "failed");
198 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
199 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
200 			mpt3sas_base_fault_info(ioc, doorbell &
201 			    MPI2_DOORBELL_DATA_MASK);
202 		if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
203 		    MPI2_IOC_STATE_OPERATIONAL)
204 			return; /* don't rearm timer */
205 	}
206 
207 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
208  rearm_timer:
209 	if (ioc->fault_reset_work_q)
210 		queue_delayed_work(ioc->fault_reset_work_q,
211 		    &ioc->fault_reset_work,
212 		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
213 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
214 }
215 
216 /**
217  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
218  * @ioc: per adapter object
219  * Context: sleep.
220  *
221  * Return nothing.
222  */
223 void
224 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
225 {
226 	unsigned long	 flags;
227 
228 	if (ioc->fault_reset_work_q)
229 		return;
230 
231 	/* initialize fault polling */
232 
233 	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
234 	snprintf(ioc->fault_reset_work_q_name,
235 	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
236 	ioc->fault_reset_work_q =
237 		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
238 	if (!ioc->fault_reset_work_q) {
239 		pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
240 		    ioc->name, __func__, __LINE__);
241 			return;
242 	}
243 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
244 	if (ioc->fault_reset_work_q)
245 		queue_delayed_work(ioc->fault_reset_work_q,
246 		    &ioc->fault_reset_work,
247 		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
248 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
249 }
250 
251 /**
252  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
253  * @ioc: per adapter object
254  * Context: sleep.
255  *
256  * Return nothing.
257  */
258 void
259 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
260 {
261 	unsigned long flags;
262 	struct workqueue_struct *wq;
263 
264 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
265 	wq = ioc->fault_reset_work_q;
266 	ioc->fault_reset_work_q = NULL;
267 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
268 	if (wq) {
269 		if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
270 			flush_workqueue(wq);
271 		destroy_workqueue(wq);
272 	}
273 }
274 
275 /**
276  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
277  * @ioc: per adapter object
278  * @fault_code: fault code
279  *
280  * Return nothing.
281  */
282 void
283 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
284 {
285 	pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
286 	    ioc->name, fault_code);
287 }
288 
289 /**
290  * mpt3sas_halt_firmware - halt's mpt controller firmware
291  * @ioc: per adapter object
292  *
293  * For debugging timeout related issues.  Writing 0xCOFFEE00
294  * to the doorbell register will halt controller firmware. With
295  * the purpose to stop both driver and firmware, the enduser can
296  * obtain a ring buffer from controller UART.
297  */
298 void
299 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
300 {
301 	u32 doorbell;
302 
303 	if (!ioc->fwfault_debug)
304 		return;
305 
306 	dump_stack();
307 
308 	doorbell = readl(&ioc->chip->Doorbell);
309 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
310 		mpt3sas_base_fault_info(ioc , doorbell);
311 	else {
312 		writel(0xC0FFEE00, &ioc->chip->Doorbell);
313 		pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
314 			ioc->name);
315 	}
316 
317 	if (ioc->fwfault_debug == 2)
318 		for (;;)
319 			;
320 	else
321 		panic("panic in %s\n", __func__);
322 }
323 
324 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
325 /**
326  * _base_sas_ioc_info - verbose translation of the ioc status
327  * @ioc: per adapter object
328  * @mpi_reply: reply mf payload returned from firmware
329  * @request_hdr: request mf
330  *
331  * Return nothing.
332  */
333 static void
334 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
335 	MPI2RequestHeader_t *request_hdr)
336 {
337 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
338 	    MPI2_IOCSTATUS_MASK;
339 	char *desc = NULL;
340 	u16 frame_sz;
341 	char *func_str = NULL;
342 
343 	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
344 	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
345 	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
346 	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
347 		return;
348 
349 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
350 		return;
351 
352 	switch (ioc_status) {
353 
354 /****************************************************************************
355 *  Common IOCStatus values for all replies
356 ****************************************************************************/
357 
358 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
359 		desc = "invalid function";
360 		break;
361 	case MPI2_IOCSTATUS_BUSY:
362 		desc = "busy";
363 		break;
364 	case MPI2_IOCSTATUS_INVALID_SGL:
365 		desc = "invalid sgl";
366 		break;
367 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
368 		desc = "internal error";
369 		break;
370 	case MPI2_IOCSTATUS_INVALID_VPID:
371 		desc = "invalid vpid";
372 		break;
373 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
374 		desc = "insufficient resources";
375 		break;
376 	case MPI2_IOCSTATUS_INVALID_FIELD:
377 		desc = "invalid field";
378 		break;
379 	case MPI2_IOCSTATUS_INVALID_STATE:
380 		desc = "invalid state";
381 		break;
382 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
383 		desc = "op state not supported";
384 		break;
385 
386 /****************************************************************************
387 *  Config IOCStatus values
388 ****************************************************************************/
389 
390 	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
391 		desc = "config invalid action";
392 		break;
393 	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
394 		desc = "config invalid type";
395 		break;
396 	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
397 		desc = "config invalid page";
398 		break;
399 	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
400 		desc = "config invalid data";
401 		break;
402 	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
403 		desc = "config no defaults";
404 		break;
405 	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
406 		desc = "config cant commit";
407 		break;
408 
409 /****************************************************************************
410 *  SCSI IO Reply
411 ****************************************************************************/
412 
413 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
414 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
415 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
416 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
417 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
418 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
419 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
420 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
421 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
422 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
423 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
424 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
425 		break;
426 
427 /****************************************************************************
428 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
429 ****************************************************************************/
430 
431 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
432 		desc = "eedp guard error";
433 		break;
434 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
435 		desc = "eedp ref tag error";
436 		break;
437 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
438 		desc = "eedp app tag error";
439 		break;
440 
441 /****************************************************************************
442 *  SCSI Target values
443 ****************************************************************************/
444 
445 	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
446 		desc = "target invalid io index";
447 		break;
448 	case MPI2_IOCSTATUS_TARGET_ABORTED:
449 		desc = "target aborted";
450 		break;
451 	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
452 		desc = "target no conn retryable";
453 		break;
454 	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
455 		desc = "target no connection";
456 		break;
457 	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
458 		desc = "target xfer count mismatch";
459 		break;
460 	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
461 		desc = "target data offset error";
462 		break;
463 	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
464 		desc = "target too much write data";
465 		break;
466 	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
467 		desc = "target iu too short";
468 		break;
469 	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
470 		desc = "target ack nak timeout";
471 		break;
472 	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
473 		desc = "target nak received";
474 		break;
475 
476 /****************************************************************************
477 *  Serial Attached SCSI values
478 ****************************************************************************/
479 
480 	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
481 		desc = "smp request failed";
482 		break;
483 	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
484 		desc = "smp data overrun";
485 		break;
486 
487 /****************************************************************************
488 *  Diagnostic Buffer Post / Diagnostic Release values
489 ****************************************************************************/
490 
491 	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
492 		desc = "diagnostic released";
493 		break;
494 	default:
495 		break;
496 	}
497 
498 	if (!desc)
499 		return;
500 
501 	switch (request_hdr->Function) {
502 	case MPI2_FUNCTION_CONFIG:
503 		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
504 		func_str = "config_page";
505 		break;
506 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
507 		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
508 		func_str = "task_mgmt";
509 		break;
510 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
511 		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
512 		func_str = "sas_iounit_ctl";
513 		break;
514 	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
515 		frame_sz = sizeof(Mpi2SepRequest_t);
516 		func_str = "enclosure";
517 		break;
518 	case MPI2_FUNCTION_IOC_INIT:
519 		frame_sz = sizeof(Mpi2IOCInitRequest_t);
520 		func_str = "ioc_init";
521 		break;
522 	case MPI2_FUNCTION_PORT_ENABLE:
523 		frame_sz = sizeof(Mpi2PortEnableRequest_t);
524 		func_str = "port_enable";
525 		break;
526 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
527 		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
528 		func_str = "smp_passthru";
529 		break;
530 	default:
531 		frame_sz = 32;
532 		func_str = "unknown";
533 		break;
534 	}
535 
536 	pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
537 		ioc->name, desc, ioc_status, request_hdr, func_str);
538 
539 	_debug_dump_mf(request_hdr, frame_sz/4);
540 }
541 
542 /**
543  * _base_display_event_data - verbose translation of firmware asyn events
544  * @ioc: per adapter object
545  * @mpi_reply: reply mf payload returned from firmware
546  *
547  * Return nothing.
548  */
549 static void
550 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
551 	Mpi2EventNotificationReply_t *mpi_reply)
552 {
553 	char *desc = NULL;
554 	u16 event;
555 
556 	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
557 		return;
558 
559 	event = le16_to_cpu(mpi_reply->Event);
560 
561 	switch (event) {
562 	case MPI2_EVENT_LOG_DATA:
563 		desc = "Log Data";
564 		break;
565 	case MPI2_EVENT_STATE_CHANGE:
566 		desc = "Status Change";
567 		break;
568 	case MPI2_EVENT_HARD_RESET_RECEIVED:
569 		desc = "Hard Reset Received";
570 		break;
571 	case MPI2_EVENT_EVENT_CHANGE:
572 		desc = "Event Change";
573 		break;
574 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
575 		desc = "Device Status Change";
576 		break;
577 	case MPI2_EVENT_IR_OPERATION_STATUS:
578 		desc = "IR Operation Status";
579 		break;
580 	case MPI2_EVENT_SAS_DISCOVERY:
581 	{
582 		Mpi2EventDataSasDiscovery_t *event_data =
583 		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
584 		pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
585 		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
586 		    "start" : "stop");
587 		if (event_data->DiscoveryStatus)
588 			pr_info("discovery_status(0x%08x)",
589 			    le32_to_cpu(event_data->DiscoveryStatus));
590 			pr_info("\n");
591 		return;
592 	}
593 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
594 		desc = "SAS Broadcast Primitive";
595 		break;
596 	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
597 		desc = "SAS Init Device Status Change";
598 		break;
599 	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
600 		desc = "SAS Init Table Overflow";
601 		break;
602 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
603 		desc = "SAS Topology Change List";
604 		break;
605 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
606 		desc = "SAS Enclosure Device Status Change";
607 		break;
608 	case MPI2_EVENT_IR_VOLUME:
609 		desc = "IR Volume";
610 		break;
611 	case MPI2_EVENT_IR_PHYSICAL_DISK:
612 		desc = "IR Physical Disk";
613 		break;
614 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
615 		desc = "IR Configuration Change List";
616 		break;
617 	case MPI2_EVENT_LOG_ENTRY_ADDED:
618 		desc = "Log Entry Added";
619 		break;
620 	}
621 
622 	if (!desc)
623 		return;
624 
625 	pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
626 }
627 #endif
628 
629 /**
630  * _base_sas_log_info - verbose translation of firmware log info
631  * @ioc: per adapter object
632  * @log_info: log info
633  *
634  * Return nothing.
635  */
636 static void
637 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
638 {
639 	union loginfo_type {
640 		u32	loginfo;
641 		struct {
642 			u32	subcode:16;
643 			u32	code:8;
644 			u32	originator:4;
645 			u32	bus_type:4;
646 		} dw;
647 	};
648 	union loginfo_type sas_loginfo;
649 	char *originator_str = NULL;
650 
651 	sas_loginfo.loginfo = log_info;
652 	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
653 		return;
654 
655 	/* each nexus loss loginfo */
656 	if (log_info == 0x31170000)
657 		return;
658 
659 	/* eat the loginfos associated with task aborts */
660 	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
661 	    0x31140000 || log_info == 0x31130000))
662 		return;
663 
664 	switch (sas_loginfo.dw.originator) {
665 	case 0:
666 		originator_str = "IOP";
667 		break;
668 	case 1:
669 		originator_str = "PL";
670 		break;
671 	case 2:
672 		originator_str = "IR";
673 		break;
674 	}
675 
676 	pr_warn(MPT3SAS_FMT
677 		"log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
678 		ioc->name, log_info,
679 	     originator_str, sas_loginfo.dw.code,
680 	     sas_loginfo.dw.subcode);
681 }
682 
683 /**
684  * _base_display_reply_info -
685  * @ioc: per adapter object
686  * @smid: system request message index
687  * @msix_index: MSIX table index supplied by the OS
688  * @reply: reply message frame(lower 32bit addr)
689  *
690  * Return nothing.
691  */
692 static void
693 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
694 	u32 reply)
695 {
696 	MPI2DefaultReply_t *mpi_reply;
697 	u16 ioc_status;
698 	u32 loginfo = 0;
699 
700 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
701 	if (unlikely(!mpi_reply)) {
702 		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
703 		    ioc->name, __FILE__, __LINE__, __func__);
704 		return;
705 	}
706 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
707 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
708 	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
709 	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
710 		_base_sas_ioc_info(ioc , mpi_reply,
711 		   mpt3sas_base_get_msg_frame(ioc, smid));
712 	}
713 #endif
714 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
715 		loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
716 		_base_sas_log_info(ioc, loginfo);
717 	}
718 
719 	if (ioc_status || loginfo) {
720 		ioc_status &= MPI2_IOCSTATUS_MASK;
721 		mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
722 	}
723 }
724 
725 /**
726  * mpt3sas_base_done - base internal command completion routine
727  * @ioc: per adapter object
728  * @smid: system request message index
729  * @msix_index: MSIX table index supplied by the OS
730  * @reply: reply message frame(lower 32bit addr)
731  *
732  * Return 1 meaning mf should be freed from _base_interrupt
733  *        0 means the mf is freed from this function.
734  */
735 u8
736 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
737 	u32 reply)
738 {
739 	MPI2DefaultReply_t *mpi_reply;
740 
741 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
742 	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
743 		return 1;
744 
745 	if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
746 		return 1;
747 
748 	ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
749 	if (mpi_reply) {
750 		ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
751 		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
752 	}
753 	ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
754 
755 	complete(&ioc->base_cmds.done);
756 	return 1;
757 }
758 
759 /**
760  * _base_async_event - main callback handler for firmware asyn events
761  * @ioc: per adapter object
762  * @msix_index: MSIX table index supplied by the OS
763  * @reply: reply message frame(lower 32bit addr)
764  *
765  * Return 1 meaning mf should be freed from _base_interrupt
766  *        0 means the mf is freed from this function.
767  */
768 static u8
769 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
770 {
771 	Mpi2EventNotificationReply_t *mpi_reply;
772 	Mpi2EventAckRequest_t *ack_request;
773 	u16 smid;
774 
775 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
776 	if (!mpi_reply)
777 		return 1;
778 	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
779 		return 1;
780 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
781 	_base_display_event_data(ioc, mpi_reply);
782 #endif
783 	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
784 		goto out;
785 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
786 	if (!smid) {
787 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
788 		    ioc->name, __func__);
789 		goto out;
790 	}
791 
792 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
793 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
794 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
795 	ack_request->Event = mpi_reply->Event;
796 	ack_request->EventContext = mpi_reply->EventContext;
797 	ack_request->VF_ID = 0;  /* TODO */
798 	ack_request->VP_ID = 0;
799 	mpt3sas_base_put_smid_default(ioc, smid);
800 
801  out:
802 
803 	/* scsih callback handler */
804 	mpt3sas_scsih_event_callback(ioc, msix_index, reply);
805 
806 	/* ctl callback handler */
807 	mpt3sas_ctl_event_callback(ioc, msix_index, reply);
808 
809 	return 1;
810 }
811 
812 /**
813  * _base_get_cb_idx - obtain the callback index
814  * @ioc: per adapter object
815  * @smid: system request message index
816  *
817  * Return callback index.
818  */
819 static u8
820 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
821 {
822 	int i;
823 	u8 cb_idx;
824 
825 	if (smid < ioc->hi_priority_smid) {
826 		i = smid - 1;
827 		cb_idx = ioc->scsi_lookup[i].cb_idx;
828 	} else if (smid < ioc->internal_smid) {
829 		i = smid - ioc->hi_priority_smid;
830 		cb_idx = ioc->hpr_lookup[i].cb_idx;
831 	} else if (smid <= ioc->hba_queue_depth) {
832 		i = smid - ioc->internal_smid;
833 		cb_idx = ioc->internal_lookup[i].cb_idx;
834 	} else
835 		cb_idx = 0xFF;
836 	return cb_idx;
837 }
838 
839 /**
840  * _base_mask_interrupts - disable interrupts
841  * @ioc: per adapter object
842  *
843  * Disabling ResetIRQ, Reply and Doorbell Interrupts
844  *
845  * Return nothing.
846  */
847 static void
848 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
849 {
850 	u32 him_register;
851 
852 	ioc->mask_interrupts = 1;
853 	him_register = readl(&ioc->chip->HostInterruptMask);
854 	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
855 	writel(him_register, &ioc->chip->HostInterruptMask);
856 	readl(&ioc->chip->HostInterruptMask);
857 }
858 
859 /**
860  * _base_unmask_interrupts - enable interrupts
861  * @ioc: per adapter object
862  *
863  * Enabling only Reply Interrupts
864  *
865  * Return nothing.
866  */
867 static void
868 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
869 {
870 	u32 him_register;
871 
872 	him_register = readl(&ioc->chip->HostInterruptMask);
873 	him_register &= ~MPI2_HIM_RIM;
874 	writel(him_register, &ioc->chip->HostInterruptMask);
875 	ioc->mask_interrupts = 0;
876 }
877 
878 union reply_descriptor {
879 	u64 word;
880 	struct {
881 		u32 low;
882 		u32 high;
883 	} u;
884 };
885 
886 /**
887  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
888  * @irq: irq number (not used)
889  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
890  * @r: pt_regs pointer (not used)
891  *
892  * Return IRQ_HANDLE if processed, else IRQ_NONE.
893  */
894 static irqreturn_t
895 _base_interrupt(int irq, void *bus_id)
896 {
897 	struct adapter_reply_queue *reply_q = bus_id;
898 	union reply_descriptor rd;
899 	u32 completed_cmds;
900 	u8 request_desript_type;
901 	u16 smid;
902 	u8 cb_idx;
903 	u32 reply;
904 	u8 msix_index = reply_q->msix_index;
905 	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
906 	Mpi2ReplyDescriptorsUnion_t *rpf;
907 	u8 rc;
908 
909 	if (ioc->mask_interrupts)
910 		return IRQ_NONE;
911 
912 	if (!atomic_add_unless(&reply_q->busy, 1, 1))
913 		return IRQ_NONE;
914 
915 	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
916 	request_desript_type = rpf->Default.ReplyFlags
917 	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
918 	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
919 		atomic_dec(&reply_q->busy);
920 		return IRQ_NONE;
921 	}
922 
923 	completed_cmds = 0;
924 	cb_idx = 0xFF;
925 	do {
926 		rd.word = le64_to_cpu(rpf->Words);
927 		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
928 			goto out;
929 		reply = 0;
930 		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
931 		if (request_desript_type ==
932 		    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
933 		    request_desript_type ==
934 		    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
935 			cb_idx = _base_get_cb_idx(ioc, smid);
936 			if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
937 			    (likely(mpt_callbacks[cb_idx] != NULL))) {
938 				rc = mpt_callbacks[cb_idx](ioc, smid,
939 				    msix_index, 0);
940 				if (rc)
941 					mpt3sas_base_free_smid(ioc, smid);
942 			}
943 		} else if (request_desript_type ==
944 		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
945 			reply = le32_to_cpu(
946 			    rpf->AddressReply.ReplyFrameAddress);
947 			if (reply > ioc->reply_dma_max_address ||
948 			    reply < ioc->reply_dma_min_address)
949 				reply = 0;
950 			if (smid) {
951 				cb_idx = _base_get_cb_idx(ioc, smid);
952 				if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
953 				    (likely(mpt_callbacks[cb_idx] != NULL))) {
954 					rc = mpt_callbacks[cb_idx](ioc, smid,
955 					    msix_index, reply);
956 					if (reply)
957 						_base_display_reply_info(ioc,
958 						    smid, msix_index, reply);
959 					if (rc)
960 						mpt3sas_base_free_smid(ioc,
961 						    smid);
962 				}
963 			} else {
964 				_base_async_event(ioc, msix_index, reply);
965 			}
966 
967 			/* reply free queue handling */
968 			if (reply) {
969 				ioc->reply_free_host_index =
970 				    (ioc->reply_free_host_index ==
971 				    (ioc->reply_free_queue_depth - 1)) ?
972 				    0 : ioc->reply_free_host_index + 1;
973 				ioc->reply_free[ioc->reply_free_host_index] =
974 				    cpu_to_le32(reply);
975 				wmb();
976 				writel(ioc->reply_free_host_index,
977 				    &ioc->chip->ReplyFreeHostIndex);
978 			}
979 		}
980 
981 		rpf->Words = cpu_to_le64(ULLONG_MAX);
982 		reply_q->reply_post_host_index =
983 		    (reply_q->reply_post_host_index ==
984 		    (ioc->reply_post_queue_depth - 1)) ? 0 :
985 		    reply_q->reply_post_host_index + 1;
986 		request_desript_type =
987 		    reply_q->reply_post_free[reply_q->reply_post_host_index].
988 		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
989 		completed_cmds++;
990 		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
991 			goto out;
992 		if (!reply_q->reply_post_host_index)
993 			rpf = reply_q->reply_post_free;
994 		else
995 			rpf++;
996 	} while (1);
997 
998  out:
999 
1000 	if (!completed_cmds) {
1001 		atomic_dec(&reply_q->busy);
1002 		return IRQ_NONE;
1003 	}
1004 
1005 	wmb();
1006 	writel(reply_q->reply_post_host_index | (msix_index <<
1007 	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1008 	atomic_dec(&reply_q->busy);
1009 	return IRQ_HANDLED;
1010 }
1011 
1012 /**
1013  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1014  * @ioc: per adapter object
1015  *
1016  */
1017 static inline int
1018 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1019 {
1020 	return (ioc->facts.IOCCapabilities &
1021 	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1022 }
1023 
1024 /**
1025  * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
1026  * @ioc: per adapter object
1027  * Context: ISR conext
1028  *
1029  * Called when a Task Management request has completed. We want
1030  * to flush the other reply queues so all the outstanding IO has been
1031  * completed back to OS before we process the TM completetion.
1032  *
1033  * Return nothing.
1034  */
1035 void
1036 mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1037 {
1038 	struct adapter_reply_queue *reply_q;
1039 
1040 	/* If MSIX capability is turned off
1041 	 * then multi-queues are not enabled
1042 	 */
1043 	if (!_base_is_controller_msix_enabled(ioc))
1044 		return;
1045 
1046 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1047 		if (ioc->shost_recovery)
1048 			return;
1049 		/* TMs are on msix_index == 0 */
1050 		if (reply_q->msix_index == 0)
1051 			continue;
1052 		_base_interrupt(reply_q->vector, (void *)reply_q);
1053 	}
1054 }
1055 
1056 /**
1057  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1058  * @cb_idx: callback index
1059  *
1060  * Return nothing.
1061  */
1062 void
1063 mpt3sas_base_release_callback_handler(u8 cb_idx)
1064 {
1065 	mpt_callbacks[cb_idx] = NULL;
1066 }
1067 
1068 /**
1069  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1070  * @cb_func: callback function
1071  *
1072  * Returns cb_func.
1073  */
1074 u8
1075 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1076 {
1077 	u8 cb_idx;
1078 
1079 	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1080 		if (mpt_callbacks[cb_idx] == NULL)
1081 			break;
1082 
1083 	mpt_callbacks[cb_idx] = cb_func;
1084 	return cb_idx;
1085 }
1086 
1087 /**
1088  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1089  *
1090  * Return nothing.
1091  */
1092 void
1093 mpt3sas_base_initialize_callback_handler(void)
1094 {
1095 	u8 cb_idx;
1096 
1097 	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1098 		mpt3sas_base_release_callback_handler(cb_idx);
1099 }
1100 
1101 
1102 /**
1103  * _base_build_zero_len_sge - build zero length sg entry
1104  * @ioc: per adapter object
1105  * @paddr: virtual address for SGE
1106  *
1107  * Create a zero length scatter gather entry to insure the IOCs hardware has
1108  * something to use if the target device goes brain dead and tries
1109  * to send data even when none is asked for.
1110  *
1111  * Return nothing.
1112  */
1113 static void
1114 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1115 {
1116 	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1117 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1118 	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1119 	    MPI2_SGE_FLAGS_SHIFT);
1120 	ioc->base_add_sg_single(paddr, flags_length, -1);
1121 }
1122 
1123 /**
1124  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1125  * @paddr: virtual address for SGE
1126  * @flags_length: SGE flags and data transfer length
1127  * @dma_addr: Physical address
1128  *
1129  * Return nothing.
1130  */
1131 static void
1132 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1133 {
1134 	Mpi2SGESimple32_t *sgel = paddr;
1135 
1136 	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1137 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1138 	sgel->FlagsLength = cpu_to_le32(flags_length);
1139 	sgel->Address = cpu_to_le32(dma_addr);
1140 }
1141 
1142 
1143 /**
1144  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1145  * @paddr: virtual address for SGE
1146  * @flags_length: SGE flags and data transfer length
1147  * @dma_addr: Physical address
1148  *
1149  * Return nothing.
1150  */
1151 static void
1152 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1153 {
1154 	Mpi2SGESimple64_t *sgel = paddr;
1155 
1156 	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1157 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1158 	sgel->FlagsLength = cpu_to_le32(flags_length);
1159 	sgel->Address = cpu_to_le64(dma_addr);
1160 }
1161 
1162 /**
1163  * _base_get_chain_buffer_tracker - obtain chain tracker
1164  * @ioc: per adapter object
1165  * @smid: smid associated to an IO request
1166  *
1167  * Returns chain tracker(from ioc->free_chain_list)
1168  */
1169 static struct chain_tracker *
1170 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1171 {
1172 	struct chain_tracker *chain_req;
1173 	unsigned long flags;
1174 
1175 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1176 	if (list_empty(&ioc->free_chain_list)) {
1177 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1178 		dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1179 			"chain buffers not available\n", ioc->name));
1180 		return NULL;
1181 	}
1182 	chain_req = list_entry(ioc->free_chain_list.next,
1183 	    struct chain_tracker, tracker_list);
1184 	list_del_init(&chain_req->tracker_list);
1185 	list_add_tail(&chain_req->tracker_list,
1186 	    &ioc->scsi_lookup[smid - 1].chain_list);
1187 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1188 	return chain_req;
1189 }
1190 
1191 
1192 /**
1193  * _base_build_sg - build generic sg
1194  * @ioc: per adapter object
1195  * @psge: virtual address for SGE
1196  * @data_out_dma: physical address for WRITES
1197  * @data_out_sz: data xfer size for WRITES
1198  * @data_in_dma: physical address for READS
1199  * @data_in_sz: data xfer size for READS
1200  *
1201  * Return nothing.
1202  */
1203 static void
1204 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1205 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1206 	size_t data_in_sz)
1207 {
1208 	u32 sgl_flags;
1209 
1210 	if (!data_out_sz && !data_in_sz) {
1211 		_base_build_zero_len_sge(ioc, psge);
1212 		return;
1213 	}
1214 
1215 	if (data_out_sz && data_in_sz) {
1216 		/* WRITE sgel first */
1217 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1218 		    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1219 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1220 		ioc->base_add_sg_single(psge, sgl_flags |
1221 		    data_out_sz, data_out_dma);
1222 
1223 		/* incr sgel */
1224 		psge += ioc->sge_size;
1225 
1226 		/* READ sgel last */
1227 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1228 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1229 		    MPI2_SGE_FLAGS_END_OF_LIST);
1230 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1231 		ioc->base_add_sg_single(psge, sgl_flags |
1232 		    data_in_sz, data_in_dma);
1233 	} else if (data_out_sz) /* WRITE */ {
1234 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1235 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1236 		    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1237 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1238 		ioc->base_add_sg_single(psge, sgl_flags |
1239 		    data_out_sz, data_out_dma);
1240 	} else if (data_in_sz) /* READ */ {
1241 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1242 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1243 		    MPI2_SGE_FLAGS_END_OF_LIST);
1244 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1245 		ioc->base_add_sg_single(psge, sgl_flags |
1246 		    data_in_sz, data_in_dma);
1247 	}
1248 }
1249 
1250 /* IEEE format sgls */
1251 
1252 /**
1253  * _base_add_sg_single_ieee - add sg element for IEEE format
1254  * @paddr: virtual address for SGE
1255  * @flags: SGE flags
1256  * @chain_offset: number of 128 byte elements from start of segment
1257  * @length: data transfer length
1258  * @dma_addr: Physical address
1259  *
1260  * Return nothing.
1261  */
1262 static void
1263 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1264 	dma_addr_t dma_addr)
1265 {
1266 	Mpi25IeeeSgeChain64_t *sgel = paddr;
1267 
1268 	sgel->Flags = flags;
1269 	sgel->NextChainOffset = chain_offset;
1270 	sgel->Length = cpu_to_le32(length);
1271 	sgel->Address = cpu_to_le64(dma_addr);
1272 }
1273 
1274 /**
1275  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1276  * @ioc: per adapter object
1277  * @paddr: virtual address for SGE
1278  *
1279  * Create a zero length scatter gather entry to insure the IOCs hardware has
1280  * something to use if the target device goes brain dead and tries
1281  * to send data even when none is asked for.
1282  *
1283  * Return nothing.
1284  */
1285 static void
1286 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1287 {
1288 	u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1289 		MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1290 		MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
1291 	_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1292 }
1293 
1294 /**
1295  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1296  * @ioc: per adapter object
1297  * @scmd: scsi command
1298  * @smid: system request message index
1299  * Context: none.
1300  *
1301  * The main routine that builds scatter gather table from a given
1302  * scsi request sent via the .queuecommand main handler.
1303  *
1304  * Returns 0 success, anything else error
1305  */
1306 static int
1307 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1308 	struct scsi_cmnd *scmd, u16 smid)
1309 {
1310 	Mpi2SCSIIORequest_t *mpi_request;
1311 	dma_addr_t chain_dma;
1312 	struct scatterlist *sg_scmd;
1313 	void *sg_local, *chain;
1314 	u32 chain_offset;
1315 	u32 chain_length;
1316 	int sges_left;
1317 	u32 sges_in_segment;
1318 	u8 simple_sgl_flags;
1319 	u8 simple_sgl_flags_last;
1320 	u8 chain_sgl_flags;
1321 	struct chain_tracker *chain_req;
1322 
1323 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1324 
1325 	/* init scatter gather flags */
1326 	simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1327 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1328 	simple_sgl_flags_last = simple_sgl_flags |
1329 	    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1330 	chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1331 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1332 
1333 	sg_scmd = scsi_sglist(scmd);
1334 	sges_left = scsi_dma_map(scmd);
1335 	if (!sges_left) {
1336 		sdev_printk(KERN_ERR, scmd->device,
1337 			"pci_map_sg failed: request for %d bytes!\n",
1338 			scsi_bufflen(scmd));
1339 		return -ENOMEM;
1340 	}
1341 
1342 	sg_local = &mpi_request->SGL;
1343 	sges_in_segment = (ioc->request_sz -
1344 	    offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1345 	if (sges_left <= sges_in_segment)
1346 		goto fill_in_last_segment;
1347 
1348 	mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1349 	    (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1350 
1351 	/* fill in main message segment when there is a chain following */
1352 	while (sges_in_segment > 1) {
1353 		_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1354 		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1355 		sg_scmd = sg_next(sg_scmd);
1356 		sg_local += ioc->sge_size_ieee;
1357 		sges_left--;
1358 		sges_in_segment--;
1359 	}
1360 
1361 	/* initializing the pointers */
1362 	chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1363 	if (!chain_req)
1364 		return -1;
1365 	chain = chain_req->chain_buffer;
1366 	chain_dma = chain_req->chain_buffer_dma;
1367 	do {
1368 		sges_in_segment = (sges_left <=
1369 		    ioc->max_sges_in_chain_message) ? sges_left :
1370 		    ioc->max_sges_in_chain_message;
1371 		chain_offset = (sges_left == sges_in_segment) ?
1372 		    0 : sges_in_segment;
1373 		chain_length = sges_in_segment * ioc->sge_size_ieee;
1374 		if (chain_offset)
1375 			chain_length += ioc->sge_size_ieee;
1376 		_base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1377 		    chain_offset, chain_length, chain_dma);
1378 
1379 		sg_local = chain;
1380 		if (!chain_offset)
1381 			goto fill_in_last_segment;
1382 
1383 		/* fill in chain segments */
1384 		while (sges_in_segment) {
1385 			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1386 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1387 			sg_scmd = sg_next(sg_scmd);
1388 			sg_local += ioc->sge_size_ieee;
1389 			sges_left--;
1390 			sges_in_segment--;
1391 		}
1392 
1393 		chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1394 		if (!chain_req)
1395 			return -1;
1396 		chain = chain_req->chain_buffer;
1397 		chain_dma = chain_req->chain_buffer_dma;
1398 	} while (1);
1399 
1400 
1401  fill_in_last_segment:
1402 
1403 	/* fill the last segment */
1404 	while (sges_left) {
1405 		if (sges_left == 1)
1406 			_base_add_sg_single_ieee(sg_local,
1407 			    simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1408 			    sg_dma_address(sg_scmd));
1409 		else
1410 			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1411 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1412 		sg_scmd = sg_next(sg_scmd);
1413 		sg_local += ioc->sge_size_ieee;
1414 		sges_left--;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 /**
1421  * _base_build_sg_ieee - build generic sg for IEEE format
1422  * @ioc: per adapter object
1423  * @psge: virtual address for SGE
1424  * @data_out_dma: physical address for WRITES
1425  * @data_out_sz: data xfer size for WRITES
1426  * @data_in_dma: physical address for READS
1427  * @data_in_sz: data xfer size for READS
1428  *
1429  * Return nothing.
1430  */
1431 static void
1432 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1433 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1434 	size_t data_in_sz)
1435 {
1436 	u8 sgl_flags;
1437 
1438 	if (!data_out_sz && !data_in_sz) {
1439 		_base_build_zero_len_sge_ieee(ioc, psge);
1440 		return;
1441 	}
1442 
1443 	if (data_out_sz && data_in_sz) {
1444 		/* WRITE sgel first */
1445 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1446 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1447 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1448 		    data_out_dma);
1449 
1450 		/* incr sgel */
1451 		psge += ioc->sge_size_ieee;
1452 
1453 		/* READ sgel last */
1454 		sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1455 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1456 		    data_in_dma);
1457 	} else if (data_out_sz) /* WRITE */ {
1458 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1459 		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1460 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1461 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1462 		    data_out_dma);
1463 	} else if (data_in_sz) /* READ */ {
1464 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1465 		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1466 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1467 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1468 		    data_in_dma);
1469 	}
1470 }
1471 
1472 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1473 
1474 /**
1475  * _base_config_dma_addressing - set dma addressing
1476  * @ioc: per adapter object
1477  * @pdev: PCI device struct
1478  *
1479  * Returns 0 for success, non-zero for failure.
1480  */
1481 static int
1482 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1483 {
1484 	struct sysinfo s;
1485 	char *desc = NULL;
1486 
1487 	if (sizeof(dma_addr_t) > 4) {
1488 		const uint64_t required_mask =
1489 		    dma_get_required_mask(&pdev->dev);
1490 		if ((required_mask > DMA_BIT_MASK(32)) &&
1491 		    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1492 		    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1493 			ioc->base_add_sg_single = &_base_add_sg_single_64;
1494 			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1495 			desc = "64";
1496 			goto out;
1497 		}
1498 	}
1499 
1500 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1501 	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1502 		ioc->base_add_sg_single = &_base_add_sg_single_32;
1503 		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1504 		desc = "32";
1505 	} else
1506 		return -ENODEV;
1507 
1508  out:
1509 	si_meminfo(&s);
1510 	pr_info(MPT3SAS_FMT
1511 		"%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1512 		ioc->name, desc, convert_to_kb(s.totalram));
1513 
1514 	return 0;
1515 }
1516 
1517 /**
1518  * _base_check_enable_msix - checks MSIX capabable.
1519  * @ioc: per adapter object
1520  *
1521  * Check to see if card is capable of MSIX, and set number
1522  * of available msix vectors
1523  */
1524 static int
1525 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1526 {
1527 	int base;
1528 	u16 message_control;
1529 
1530 	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1531 	if (!base) {
1532 		dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1533 			ioc->name));
1534 		return -EINVAL;
1535 	}
1536 
1537 	/* get msix vector count */
1538 
1539 	pci_read_config_word(ioc->pdev, base + 2, &message_control);
1540 	ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1541 	if (ioc->msix_vector_count > 8)
1542 		ioc->msix_vector_count = 8;
1543 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
1544 		"msix is supported, vector_count(%d)\n",
1545 		ioc->name, ioc->msix_vector_count));
1546 	return 0;
1547 }
1548 
1549 /**
1550  * _base_free_irq - free irq
1551  * @ioc: per adapter object
1552  *
1553  * Freeing respective reply_queue from the list.
1554  */
1555 static void
1556 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1557 {
1558 	struct adapter_reply_queue *reply_q, *next;
1559 
1560 	if (list_empty(&ioc->reply_queue_list))
1561 		return;
1562 
1563 	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1564 		list_del(&reply_q->list);
1565 		synchronize_irq(reply_q->vector);
1566 		free_irq(reply_q->vector, reply_q);
1567 		kfree(reply_q);
1568 	}
1569 }
1570 
1571 /**
1572  * _base_request_irq - request irq
1573  * @ioc: per adapter object
1574  * @index: msix index into vector table
1575  * @vector: irq vector
1576  *
1577  * Inserting respective reply_queue into the list.
1578  */
1579 static int
1580 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1581 {
1582 	struct adapter_reply_queue *reply_q;
1583 	int r;
1584 
1585 	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1586 	if (!reply_q) {
1587 		pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1588 		    ioc->name, (int)sizeof(struct adapter_reply_queue));
1589 		return -ENOMEM;
1590 	}
1591 	reply_q->ioc = ioc;
1592 	reply_q->msix_index = index;
1593 	reply_q->vector = vector;
1594 	atomic_set(&reply_q->busy, 0);
1595 	if (ioc->msix_enable)
1596 		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1597 		    MPT3SAS_DRIVER_NAME, ioc->id, index);
1598 	else
1599 		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1600 		    MPT3SAS_DRIVER_NAME, ioc->id);
1601 	r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1602 	    reply_q);
1603 	if (r) {
1604 		pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1605 		    reply_q->name, vector);
1606 		kfree(reply_q);
1607 		return -EBUSY;
1608 	}
1609 
1610 	INIT_LIST_HEAD(&reply_q->list);
1611 	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1612 	return 0;
1613 }
1614 
1615 /**
1616  * _base_assign_reply_queues - assigning msix index for each cpu
1617  * @ioc: per adapter object
1618  *
1619  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1620  *
1621  * It would nice if we could call irq_set_affinity, however it is not
1622  * an exported symbol
1623  */
1624 static void
1625 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1626 {
1627 	unsigned int cpu, nr_cpus, nr_msix, index = 0;
1628 
1629 	if (!_base_is_controller_msix_enabled(ioc))
1630 		return;
1631 
1632 	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1633 
1634 	nr_cpus = num_online_cpus();
1635 	nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
1636 					       ioc->facts.MaxMSIxVectors);
1637 	if (!nr_msix)
1638 		return;
1639 
1640 	cpu = cpumask_first(cpu_online_mask);
1641 
1642 	do {
1643 		unsigned int i, group = nr_cpus / nr_msix;
1644 
1645 		if (index < nr_cpus % nr_msix)
1646 			group++;
1647 
1648 		for (i = 0 ; i < group ; i++) {
1649 			ioc->cpu_msix_table[cpu] = index;
1650 			cpu = cpumask_next(cpu, cpu_online_mask);
1651 		}
1652 
1653 		index++;
1654 
1655 	} while (cpu < nr_cpus);
1656 }
1657 
1658 /**
1659  * _base_disable_msix - disables msix
1660  * @ioc: per adapter object
1661  *
1662  */
1663 static void
1664 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1665 {
1666 	if (!ioc->msix_enable)
1667 		return;
1668 	pci_disable_msix(ioc->pdev);
1669 	ioc->msix_enable = 0;
1670 }
1671 
1672 /**
1673  * _base_enable_msix - enables msix, failback to io_apic
1674  * @ioc: per adapter object
1675  *
1676  */
1677 static int
1678 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1679 {
1680 	struct msix_entry *entries, *a;
1681 	int r;
1682 	int i;
1683 	u8 try_msix = 0;
1684 
1685 	if (msix_disable == -1 || msix_disable == 0)
1686 		try_msix = 1;
1687 
1688 	if (!try_msix)
1689 		goto try_ioapic;
1690 
1691 	if (_base_check_enable_msix(ioc) != 0)
1692 		goto try_ioapic;
1693 
1694 	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1695 	    ioc->msix_vector_count);
1696 
1697 	printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1698 	  ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1699 	  ioc->cpu_count, max_msix_vectors);
1700 
1701 	if (max_msix_vectors > 0) {
1702 		ioc->reply_queue_count = min_t(int, max_msix_vectors,
1703 			ioc->reply_queue_count);
1704 		ioc->msix_vector_count = ioc->reply_queue_count;
1705 	}
1706 
1707 	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1708 	    GFP_KERNEL);
1709 	if (!entries) {
1710 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
1711 			"kcalloc failed @ at %s:%d/%s() !!!\n",
1712 			ioc->name, __FILE__, __LINE__, __func__));
1713 		goto try_ioapic;
1714 	}
1715 
1716 	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1717 		a->entry = i;
1718 
1719 	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1720 	if (r) {
1721 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
1722 			"pci_enable_msix failed (r=%d) !!!\n",
1723 			ioc->name, r));
1724 		kfree(entries);
1725 		goto try_ioapic;
1726 	}
1727 
1728 	ioc->msix_enable = 1;
1729 	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1730 		r = _base_request_irq(ioc, i, a->vector);
1731 		if (r) {
1732 			_base_free_irq(ioc);
1733 			_base_disable_msix(ioc);
1734 			kfree(entries);
1735 			goto try_ioapic;
1736 		}
1737 	}
1738 
1739 	kfree(entries);
1740 	return 0;
1741 
1742 /* failback to io_apic interrupt routing */
1743  try_ioapic:
1744 
1745 	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1746 
1747 	return r;
1748 }
1749 
1750 /**
1751  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
1752  * @ioc: per adapter object
1753  *
1754  * Returns 0 for success, non-zero for failure.
1755  */
1756 int
1757 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1758 {
1759 	struct pci_dev *pdev = ioc->pdev;
1760 	u32 memap_sz;
1761 	u32 pio_sz;
1762 	int i, r = 0;
1763 	u64 pio_chip = 0;
1764 	u64 chip_phys = 0;
1765 	struct adapter_reply_queue *reply_q;
1766 
1767 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
1768 	    ioc->name, __func__));
1769 
1770 	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1771 	if (pci_enable_device_mem(pdev)) {
1772 		pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
1773 			ioc->name);
1774 		ioc->bars = 0;
1775 		return -ENODEV;
1776 	}
1777 
1778 
1779 	if (pci_request_selected_regions(pdev, ioc->bars,
1780 	    MPT3SAS_DRIVER_NAME)) {
1781 		pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
1782 			ioc->name);
1783 		ioc->bars = 0;
1784 		r = -ENODEV;
1785 		goto out_fail;
1786 	}
1787 
1788 /* AER (Advanced Error Reporting) hooks */
1789 	pci_enable_pcie_error_reporting(pdev);
1790 
1791 	pci_set_master(pdev);
1792 
1793 
1794 	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1795 		pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
1796 		    ioc->name, pci_name(pdev));
1797 		r = -ENODEV;
1798 		goto out_fail;
1799 	}
1800 
1801 	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1802 		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1803 			if (pio_sz)
1804 				continue;
1805 			pio_chip = (u64)pci_resource_start(pdev, i);
1806 			pio_sz = pci_resource_len(pdev, i);
1807 		} else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1808 			if (memap_sz)
1809 				continue;
1810 			ioc->chip_phys = pci_resource_start(pdev, i);
1811 			chip_phys = (u64)ioc->chip_phys;
1812 			memap_sz = pci_resource_len(pdev, i);
1813 			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1814 			if (ioc->chip == NULL) {
1815 				pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
1816 					ioc->name);
1817 				r = -EINVAL;
1818 				goto out_fail;
1819 			}
1820 		}
1821 	}
1822 
1823 	_base_mask_interrupts(ioc);
1824 	r = _base_enable_msix(ioc);
1825 	if (r)
1826 		goto out_fail;
1827 
1828 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1829 		pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
1830 		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1831 		    "IO-APIC enabled"), reply_q->vector);
1832 
1833 	pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1834 	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1835 	pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
1836 	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1837 
1838 	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1839 	pci_save_state(pdev);
1840 	return 0;
1841 
1842  out_fail:
1843 	if (ioc->chip_phys)
1844 		iounmap(ioc->chip);
1845 	ioc->chip_phys = 0;
1846 	pci_release_selected_regions(ioc->pdev, ioc->bars);
1847 	pci_disable_pcie_error_reporting(pdev);
1848 	pci_disable_device(pdev);
1849 	return r;
1850 }
1851 
1852 /**
1853  * mpt3sas_base_get_msg_frame - obtain request mf pointer
1854  * @ioc: per adapter object
1855  * @smid: system request message index(smid zero is invalid)
1856  *
1857  * Returns virt pointer to message frame.
1858  */
1859 void *
1860 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1861 {
1862 	return (void *)(ioc->request + (smid * ioc->request_sz));
1863 }
1864 
1865 /**
1866  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
1867  * @ioc: per adapter object
1868  * @smid: system request message index
1869  *
1870  * Returns virt pointer to sense buffer.
1871  */
1872 void *
1873 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1874 {
1875 	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1876 }
1877 
1878 /**
1879  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
1880  * @ioc: per adapter object
1881  * @smid: system request message index
1882  *
1883  * Returns phys pointer to the low 32bit address of the sense buffer.
1884  */
1885 __le32
1886 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1887 {
1888 	return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
1889 	    SCSI_SENSE_BUFFERSIZE));
1890 }
1891 
1892 /**
1893  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
1894  * @ioc: per adapter object
1895  * @phys_addr: lower 32 physical addr of the reply
1896  *
1897  * Converts 32bit lower physical addr into a virt address.
1898  */
1899 void *
1900 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
1901 {
1902 	if (!phys_addr)
1903 		return NULL;
1904 	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1905 }
1906 
1907 /**
1908  * mpt3sas_base_get_smid - obtain a free smid from internal queue
1909  * @ioc: per adapter object
1910  * @cb_idx: callback index
1911  *
1912  * Returns smid (zero is invalid)
1913  */
1914 u16
1915 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
1916 {
1917 	unsigned long flags;
1918 	struct request_tracker *request;
1919 	u16 smid;
1920 
1921 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1922 	if (list_empty(&ioc->internal_free_list)) {
1923 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1924 		pr_err(MPT3SAS_FMT "%s: smid not available\n",
1925 		    ioc->name, __func__);
1926 		return 0;
1927 	}
1928 
1929 	request = list_entry(ioc->internal_free_list.next,
1930 	    struct request_tracker, tracker_list);
1931 	request->cb_idx = cb_idx;
1932 	smid = request->smid;
1933 	list_del(&request->tracker_list);
1934 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1935 	return smid;
1936 }
1937 
1938 /**
1939  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1940  * @ioc: per adapter object
1941  * @cb_idx: callback index
1942  * @scmd: pointer to scsi command object
1943  *
1944  * Returns smid (zero is invalid)
1945  */
1946 u16
1947 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
1948 	struct scsi_cmnd *scmd)
1949 {
1950 	unsigned long flags;
1951 	struct scsiio_tracker *request;
1952 	u16 smid;
1953 
1954 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1955 	if (list_empty(&ioc->free_list)) {
1956 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1957 		pr_err(MPT3SAS_FMT "%s: smid not available\n",
1958 		    ioc->name, __func__);
1959 		return 0;
1960 	}
1961 
1962 	request = list_entry(ioc->free_list.next,
1963 	    struct scsiio_tracker, tracker_list);
1964 	request->scmd = scmd;
1965 	request->cb_idx = cb_idx;
1966 	smid = request->smid;
1967 	list_del(&request->tracker_list);
1968 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1969 	return smid;
1970 }
1971 
1972 /**
1973  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1974  * @ioc: per adapter object
1975  * @cb_idx: callback index
1976  *
1977  * Returns smid (zero is invalid)
1978  */
1979 u16
1980 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
1981 {
1982 	unsigned long flags;
1983 	struct request_tracker *request;
1984 	u16 smid;
1985 
1986 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1987 	if (list_empty(&ioc->hpr_free_list)) {
1988 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1989 		return 0;
1990 	}
1991 
1992 	request = list_entry(ioc->hpr_free_list.next,
1993 	    struct request_tracker, tracker_list);
1994 	request->cb_idx = cb_idx;
1995 	smid = request->smid;
1996 	list_del(&request->tracker_list);
1997 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1998 	return smid;
1999 }
2000 
2001 /**
2002  * mpt3sas_base_free_smid - put smid back on free_list
2003  * @ioc: per adapter object
2004  * @smid: system request message index
2005  *
2006  * Return nothing.
2007  */
2008 void
2009 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2010 {
2011 	unsigned long flags;
2012 	int i;
2013 	struct chain_tracker *chain_req, *next;
2014 
2015 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2016 	if (smid < ioc->hi_priority_smid) {
2017 		/* scsiio queue */
2018 		i = smid - 1;
2019 		if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2020 			list_for_each_entry_safe(chain_req, next,
2021 			    &ioc->scsi_lookup[i].chain_list, tracker_list) {
2022 				list_del_init(&chain_req->tracker_list);
2023 				list_add(&chain_req->tracker_list,
2024 				    &ioc->free_chain_list);
2025 			}
2026 		}
2027 		ioc->scsi_lookup[i].cb_idx = 0xFF;
2028 		ioc->scsi_lookup[i].scmd = NULL;
2029 		list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2030 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2031 
2032 		/*
2033 		 * See _wait_for_commands_to_complete() call with regards
2034 		 * to this code.
2035 		 */
2036 		if (ioc->shost_recovery && ioc->pending_io_count) {
2037 			if (ioc->pending_io_count == 1)
2038 				wake_up(&ioc->reset_wq);
2039 			ioc->pending_io_count--;
2040 		}
2041 		return;
2042 	} else if (smid < ioc->internal_smid) {
2043 		/* hi-priority */
2044 		i = smid - ioc->hi_priority_smid;
2045 		ioc->hpr_lookup[i].cb_idx = 0xFF;
2046 		list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2047 	} else if (smid <= ioc->hba_queue_depth) {
2048 		/* internal queue */
2049 		i = smid - ioc->internal_smid;
2050 		ioc->internal_lookup[i].cb_idx = 0xFF;
2051 		list_add(&ioc->internal_lookup[i].tracker_list,
2052 		    &ioc->internal_free_list);
2053 	}
2054 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2055 }
2056 
2057 /**
2058  * _base_writeq - 64 bit write to MMIO
2059  * @ioc: per adapter object
2060  * @b: data payload
2061  * @addr: address in MMIO space
2062  * @writeq_lock: spin lock
2063  *
2064  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2065  * care of 32 bit environment where its not quarenteed to send the entire word
2066  * in one transfer.
2067  */
2068 #if defined(writeq) && defined(CONFIG_64BIT)
2069 static inline void
2070 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2071 {
2072 	writeq(cpu_to_le64(b), addr);
2073 }
2074 #else
2075 static inline void
2076 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2077 {
2078 	unsigned long flags;
2079 	__u64 data_out = cpu_to_le64(b);
2080 
2081 	spin_lock_irqsave(writeq_lock, flags);
2082 	writel((u32)(data_out), addr);
2083 	writel((u32)(data_out >> 32), (addr + 4));
2084 	spin_unlock_irqrestore(writeq_lock, flags);
2085 }
2086 #endif
2087 
2088 static inline u8
2089 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2090 {
2091 	return ioc->cpu_msix_table[raw_smp_processor_id()];
2092 }
2093 
2094 /**
2095  * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
2096  * @ioc: per adapter object
2097  * @smid: system request message index
2098  * @handle: device handle
2099  *
2100  * Return nothing.
2101  */
2102 void
2103 mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2104 {
2105 	Mpi2RequestDescriptorUnion_t descriptor;
2106 	u64 *request = (u64 *)&descriptor;
2107 
2108 
2109 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2110 	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
2111 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2112 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2113 	descriptor.SCSIIO.LMID = 0;
2114 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2115 	    &ioc->scsi_lookup_lock);
2116 }
2117 
2118 /**
2119  * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
2120  * @ioc: per adapter object
2121  * @smid: system request message index
2122  * @handle: device handle
2123  *
2124  * Return nothing.
2125  */
2126 void
2127 mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2128 	u16 handle)
2129 {
2130 	Mpi2RequestDescriptorUnion_t descriptor;
2131 	u64 *request = (u64 *)&descriptor;
2132 
2133 	descriptor.SCSIIO.RequestFlags =
2134 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2135 	descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2136 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2137 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2138 	descriptor.SCSIIO.LMID = 0;
2139 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2140 	    &ioc->scsi_lookup_lock);
2141 }
2142 
2143 /**
2144  * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
2145  * @ioc: per adapter object
2146  * @smid: system request message index
2147  *
2148  * Return nothing.
2149  */
2150 void
2151 mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2152 {
2153 	Mpi2RequestDescriptorUnion_t descriptor;
2154 	u64 *request = (u64 *)&descriptor;
2155 
2156 	descriptor.HighPriority.RequestFlags =
2157 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2158 	descriptor.HighPriority.MSIxIndex =  0;
2159 	descriptor.HighPriority.SMID = cpu_to_le16(smid);
2160 	descriptor.HighPriority.LMID = 0;
2161 	descriptor.HighPriority.Reserved1 = 0;
2162 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2163 	    &ioc->scsi_lookup_lock);
2164 }
2165 
2166 /**
2167  * mpt3sas_base_put_smid_default - Default, primarily used for config pages
2168  * @ioc: per adapter object
2169  * @smid: system request message index
2170  *
2171  * Return nothing.
2172  */
2173 void
2174 mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2175 {
2176 	Mpi2RequestDescriptorUnion_t descriptor;
2177 	u64 *request = (u64 *)&descriptor;
2178 
2179 	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2180 	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
2181 	descriptor.Default.SMID = cpu_to_le16(smid);
2182 	descriptor.Default.LMID = 0;
2183 	descriptor.Default.DescriptorTypeDependent = 0;
2184 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2185 	    &ioc->scsi_lookup_lock);
2186 }
2187 
2188 
2189 
2190 /**
2191  * _base_display_ioc_capabilities - Disply IOC's capabilities.
2192  * @ioc: per adapter object
2193  *
2194  * Return nothing.
2195  */
2196 static void
2197 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2198 {
2199 	int i = 0;
2200 	char desc[16];
2201 	u32 iounit_pg1_flags;
2202 	u32 bios_version;
2203 
2204 	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2205 	strncpy(desc, ioc->manu_pg0.ChipName, 16);
2206 	pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2207 	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2208 	    ioc->name, desc,
2209 	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2210 	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2211 	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2212 	   ioc->facts.FWVersion.Word & 0x000000FF,
2213 	   ioc->pdev->revision,
2214 	   (bios_version & 0xFF000000) >> 24,
2215 	   (bios_version & 0x00FF0000) >> 16,
2216 	   (bios_version & 0x0000FF00) >> 8,
2217 	    bios_version & 0x000000FF);
2218 
2219 	pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2220 
2221 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2222 		pr_info("Initiator");
2223 		i++;
2224 	}
2225 
2226 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2227 		pr_info("%sTarget", i ? "," : "");
2228 		i++;
2229 	}
2230 
2231 	i = 0;
2232 	pr_info("), ");
2233 	pr_info("Capabilities=(");
2234 
2235 	if (ioc->facts.IOCCapabilities &
2236 		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2237 			pr_info("Raid");
2238 			i++;
2239 	}
2240 
2241 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2242 		pr_info("%sTLR", i ? "," : "");
2243 		i++;
2244 	}
2245 
2246 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2247 		pr_info("%sMulticast", i ? "," : "");
2248 		i++;
2249 	}
2250 
2251 	if (ioc->facts.IOCCapabilities &
2252 	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2253 		pr_info("%sBIDI Target", i ? "," : "");
2254 		i++;
2255 	}
2256 
2257 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2258 		pr_info("%sEEDP", i ? "," : "");
2259 		i++;
2260 	}
2261 
2262 	if (ioc->facts.IOCCapabilities &
2263 	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2264 		pr_info("%sSnapshot Buffer", i ? "," : "");
2265 		i++;
2266 	}
2267 
2268 	if (ioc->facts.IOCCapabilities &
2269 	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2270 		pr_info("%sDiag Trace Buffer", i ? "," : "");
2271 		i++;
2272 	}
2273 
2274 	if (ioc->facts.IOCCapabilities &
2275 	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2276 		pr_info("%sDiag Extended Buffer", i ? "," : "");
2277 		i++;
2278 	}
2279 
2280 	if (ioc->facts.IOCCapabilities &
2281 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2282 		pr_info("%sTask Set Full", i ? "," : "");
2283 		i++;
2284 	}
2285 
2286 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2287 	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2288 		pr_info("%sNCQ", i ? "," : "");
2289 		i++;
2290 	}
2291 
2292 	pr_info(")\n");
2293 }
2294 
2295 /**
2296  * mpt3sas_base_update_missing_delay - change the missing delay timers
2297  * @ioc: per adapter object
2298  * @device_missing_delay: amount of time till device is reported missing
2299  * @io_missing_delay: interval IO is returned when there is a missing device
2300  *
2301  * Return nothing.
2302  *
2303  * Passed on the command line, this function will modify the device missing
2304  * delay, as well as the io missing delay. This should be called at driver
2305  * load time.
2306  */
2307 void
2308 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
2309 	u16 device_missing_delay, u8 io_missing_delay)
2310 {
2311 	u16 dmd, dmd_new, dmd_orignal;
2312 	u8 io_missing_delay_original;
2313 	u16 sz;
2314 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2315 	Mpi2ConfigReply_t mpi_reply;
2316 	u8 num_phys = 0;
2317 	u16 ioc_status;
2318 
2319 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
2320 	if (!num_phys)
2321 		return;
2322 
2323 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2324 	    sizeof(Mpi2SasIOUnit1PhyData_t));
2325 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2326 	if (!sas_iounit_pg1) {
2327 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2328 		    ioc->name, __FILE__, __LINE__, __func__);
2329 		goto out;
2330 	}
2331 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2332 	    sas_iounit_pg1, sz))) {
2333 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2334 		    ioc->name, __FILE__, __LINE__, __func__);
2335 		goto out;
2336 	}
2337 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2338 	    MPI2_IOCSTATUS_MASK;
2339 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2340 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2341 		    ioc->name, __FILE__, __LINE__, __func__);
2342 		goto out;
2343 	}
2344 
2345 	/* device missing delay */
2346 	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2347 	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2348 		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2349 	else
2350 		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2351 	dmd_orignal = dmd;
2352 	if (device_missing_delay > 0x7F) {
2353 		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2354 		    device_missing_delay;
2355 		dmd = dmd / 16;
2356 		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2357 	} else
2358 		dmd = device_missing_delay;
2359 	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2360 
2361 	/* io missing delay */
2362 	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2363 	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2364 
2365 	if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2366 	    sz)) {
2367 		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2368 			dmd_new = (dmd &
2369 			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2370 		else
2371 			dmd_new =
2372 		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2373 		pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
2374 			ioc->name, dmd_orignal, dmd_new);
2375 		pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
2376 			ioc->name, io_missing_delay_original,
2377 		    io_missing_delay);
2378 		ioc->device_missing_delay = dmd_new;
2379 		ioc->io_missing_delay = io_missing_delay;
2380 	}
2381 
2382 out:
2383 	kfree(sas_iounit_pg1);
2384 }
2385 /**
2386  * _base_static_config_pages - static start of day config pages
2387  * @ioc: per adapter object
2388  *
2389  * Return nothing.
2390  */
2391 static void
2392 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
2393 {
2394 	Mpi2ConfigReply_t mpi_reply;
2395 	u32 iounit_pg1_flags;
2396 
2397 	mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2398 	if (ioc->ir_firmware)
2399 		mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2400 		    &ioc->manu_pg10);
2401 
2402 	/*
2403 	 * Ensure correct T10 PI operation if vendor left EEDPTagMode
2404 	 * flag unset in NVDATA.
2405 	 */
2406 	mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
2407 	if (ioc->manu_pg11.EEDPTagMode == 0) {
2408 		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
2409 		    ioc->name);
2410 		ioc->manu_pg11.EEDPTagMode &= ~0x3;
2411 		ioc->manu_pg11.EEDPTagMode |= 0x1;
2412 		mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
2413 		    &ioc->manu_pg11);
2414 	}
2415 
2416 	mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2417 	mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2418 	mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2419 	mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2420 	mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2421 	_base_display_ioc_capabilities(ioc);
2422 
2423 	/*
2424 	 * Enable task_set_full handling in iounit_pg1 when the
2425 	 * facts capabilities indicate that its supported.
2426 	 */
2427 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2428 	if ((ioc->facts.IOCCapabilities &
2429 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2430 		iounit_pg1_flags &=
2431 		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2432 	else
2433 		iounit_pg1_flags |=
2434 		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2435 	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2436 	mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2437 }
2438 
2439 /**
2440  * _base_release_memory_pools - release memory
2441  * @ioc: per adapter object
2442  *
2443  * Free memory allocated from _base_allocate_memory_pools.
2444  *
2445  * Return nothing.
2446  */
2447 static void
2448 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2449 {
2450 	int i;
2451 
2452 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2453 	    __func__));
2454 
2455 	if (ioc->request) {
2456 		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2457 		    ioc->request,  ioc->request_dma);
2458 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2459 			"request_pool(0x%p): free\n",
2460 			ioc->name, ioc->request));
2461 		ioc->request = NULL;
2462 	}
2463 
2464 	if (ioc->sense) {
2465 		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2466 		if (ioc->sense_dma_pool)
2467 			pci_pool_destroy(ioc->sense_dma_pool);
2468 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2469 			"sense_pool(0x%p): free\n",
2470 			ioc->name, ioc->sense));
2471 		ioc->sense = NULL;
2472 	}
2473 
2474 	if (ioc->reply) {
2475 		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2476 		if (ioc->reply_dma_pool)
2477 			pci_pool_destroy(ioc->reply_dma_pool);
2478 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2479 			"reply_pool(0x%p): free\n",
2480 			ioc->name, ioc->reply));
2481 		ioc->reply = NULL;
2482 	}
2483 
2484 	if (ioc->reply_free) {
2485 		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2486 		    ioc->reply_free_dma);
2487 		if (ioc->reply_free_dma_pool)
2488 			pci_pool_destroy(ioc->reply_free_dma_pool);
2489 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2490 			"reply_free_pool(0x%p): free\n",
2491 			ioc->name, ioc->reply_free));
2492 		ioc->reply_free = NULL;
2493 	}
2494 
2495 	if (ioc->reply_post_free) {
2496 		pci_pool_free(ioc->reply_post_free_dma_pool,
2497 		    ioc->reply_post_free, ioc->reply_post_free_dma);
2498 		if (ioc->reply_post_free_dma_pool)
2499 			pci_pool_destroy(ioc->reply_post_free_dma_pool);
2500 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2501 		    "reply_post_free_pool(0x%p): free\n", ioc->name,
2502 		    ioc->reply_post_free));
2503 		ioc->reply_post_free = NULL;
2504 	}
2505 
2506 	if (ioc->config_page) {
2507 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2508 		    "config_page(0x%p): free\n", ioc->name,
2509 		    ioc->config_page));
2510 		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2511 		    ioc->config_page, ioc->config_page_dma);
2512 	}
2513 
2514 	if (ioc->scsi_lookup) {
2515 		free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2516 		ioc->scsi_lookup = NULL;
2517 	}
2518 	kfree(ioc->hpr_lookup);
2519 	kfree(ioc->internal_lookup);
2520 	if (ioc->chain_lookup) {
2521 		for (i = 0; i < ioc->chain_depth; i++) {
2522 			if (ioc->chain_lookup[i].chain_buffer)
2523 				pci_pool_free(ioc->chain_dma_pool,
2524 				    ioc->chain_lookup[i].chain_buffer,
2525 				    ioc->chain_lookup[i].chain_buffer_dma);
2526 		}
2527 		if (ioc->chain_dma_pool)
2528 			pci_pool_destroy(ioc->chain_dma_pool);
2529 		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2530 		ioc->chain_lookup = NULL;
2531 	}
2532 }
2533 
2534 /**
2535  * _base_allocate_memory_pools - allocate start of day memory pools
2536  * @ioc: per adapter object
2537  * @sleep_flag: CAN_SLEEP or NO_SLEEP
2538  *
2539  * Returns 0 success, anything else error
2540  */
2541 static int
2542 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
2543 {
2544 	struct mpt3sas_facts *facts;
2545 	u16 max_sge_elements;
2546 	u16 chains_needed_per_io;
2547 	u32 sz, total_sz, reply_post_free_sz;
2548 	u32 retry_sz;
2549 	u16 max_request_credit;
2550 	unsigned short sg_tablesize;
2551 	u16 sge_size;
2552 	int i;
2553 
2554 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2555 	    __func__));
2556 
2557 
2558 	retry_sz = 0;
2559 	facts = &ioc->facts;
2560 
2561 	/* command line tunables for max sgl entries */
2562 	if (max_sgl_entries != -1)
2563 		sg_tablesize = max_sgl_entries;
2564 	else
2565 		sg_tablesize = MPT3SAS_SG_DEPTH;
2566 
2567 	if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
2568 		sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
2569 	else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS)
2570 		sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS;
2571 	ioc->shost->sg_tablesize = sg_tablesize;
2572 
2573 	ioc->hi_priority_depth = facts->HighPriorityCredit;
2574 	ioc->internal_depth = ioc->hi_priority_depth + (5);
2575 	/* command line tunables  for max controller queue depth */
2576 	if (max_queue_depth != -1 && max_queue_depth != 0) {
2577 		max_request_credit = min_t(u16, max_queue_depth +
2578 		    ioc->hi_priority_depth + ioc->internal_depth,
2579 		    facts->RequestCredit);
2580 		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
2581 			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
2582 	} else
2583 		max_request_credit = min_t(u16, facts->RequestCredit,
2584 		    MAX_HBA_QUEUE_DEPTH);
2585 
2586 	ioc->hba_queue_depth = max_request_credit;
2587 
2588 	/* request frame size */
2589 	ioc->request_sz = facts->IOCRequestFrameSize * 4;
2590 
2591 	/* reply frame size */
2592 	ioc->reply_sz = facts->ReplyFrameSize * 4;
2593 
2594 	/* calculate the max scatter element size */
2595 	sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
2596 
2597  retry_allocation:
2598 	total_sz = 0;
2599 	/* calculate number of sg elements left over in the 1st frame */
2600 	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2601 	    sizeof(Mpi2SGEIOUnion_t)) + sge_size);
2602 	ioc->max_sges_in_main_message = max_sge_elements/sge_size;
2603 
2604 	/* now do the same for a chain buffer */
2605 	max_sge_elements = ioc->request_sz - sge_size;
2606 	ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
2607 
2608 	/*
2609 	 *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2610 	 */
2611 	chains_needed_per_io = ((ioc->shost->sg_tablesize -
2612 	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2613 	    + 1;
2614 	if (chains_needed_per_io > facts->MaxChainDepth) {
2615 		chains_needed_per_io = facts->MaxChainDepth;
2616 		ioc->shost->sg_tablesize = min_t(u16,
2617 		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2618 		* chains_needed_per_io), ioc->shost->sg_tablesize);
2619 	}
2620 	ioc->chains_needed_per_io = chains_needed_per_io;
2621 
2622 	/* reply free queue sizing - taking into account for 64 FW events */
2623 	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2624 
2625 	/* calculate reply descriptor post queue depth */
2626 	ioc->reply_post_queue_depth = ioc->hba_queue_depth +
2627 				ioc->reply_free_queue_depth +  1 ;
2628 	/* align the reply post queue on the next 16 count boundary */
2629 	if (ioc->reply_post_queue_depth % 16)
2630 		ioc->reply_post_queue_depth += 16 -
2631 		(ioc->reply_post_queue_depth % 16);
2632 
2633 
2634 	if (ioc->reply_post_queue_depth >
2635 	    facts->MaxReplyDescriptorPostQueueDepth) {
2636 		ioc->reply_post_queue_depth =
2637 				facts->MaxReplyDescriptorPostQueueDepth -
2638 		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
2639 		ioc->hba_queue_depth =
2640 				((ioc->reply_post_queue_depth - 64) / 2) - 1;
2641 		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2642 	}
2643 
2644 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
2645 	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2646 	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2647 	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2648 	    ioc->chains_needed_per_io));
2649 
2650 	ioc->scsiio_depth = ioc->hba_queue_depth -
2651 	    ioc->hi_priority_depth - ioc->internal_depth;
2652 
2653 	/* set the scsi host can_queue depth
2654 	 * with some internal commands that could be outstanding
2655 	 */
2656 	ioc->shost->can_queue = ioc->scsiio_depth;
2657 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2658 		"scsi host: can_queue depth (%d)\n",
2659 		ioc->name, ioc->shost->can_queue));
2660 
2661 
2662 	/* contiguous pool for request and chains, 16 byte align, one extra "
2663 	 * "frame for smid=0
2664 	 */
2665 	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2666 	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2667 
2668 	/* hi-priority queue */
2669 	sz += (ioc->hi_priority_depth * ioc->request_sz);
2670 
2671 	/* internal queue */
2672 	sz += (ioc->internal_depth * ioc->request_sz);
2673 
2674 	ioc->request_dma_sz = sz;
2675 	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2676 	if (!ioc->request) {
2677 		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
2678 		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2679 		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2680 		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2681 		if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
2682 			goto out;
2683 		retry_sz += 64;
2684 		ioc->hba_queue_depth = max_request_credit - retry_sz;
2685 		goto retry_allocation;
2686 	}
2687 
2688 	if (retry_sz)
2689 		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
2690 		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2691 		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2692 		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2693 
2694 	/* hi-priority queue */
2695 	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2696 	    ioc->request_sz);
2697 	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2698 	    ioc->request_sz);
2699 
2700 	/* internal queue */
2701 	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2702 	    ioc->request_sz);
2703 	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2704 	    ioc->request_sz);
2705 
2706 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2707 		"request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
2708 		ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2709 	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2710 
2711 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
2712 	    ioc->name, (unsigned long long) ioc->request_dma));
2713 	total_sz += sz;
2714 
2715 	sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
2716 	ioc->scsi_lookup_pages = get_order(sz);
2717 	ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
2718 	    GFP_KERNEL, ioc->scsi_lookup_pages);
2719 	if (!ioc->scsi_lookup) {
2720 		pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
2721 			ioc->name, (int)sz);
2722 		goto out;
2723 	}
2724 
2725 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
2726 		ioc->name, ioc->request, ioc->scsiio_depth));
2727 
2728 	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2729 	sz = ioc->chain_depth * sizeof(struct chain_tracker);
2730 	ioc->chain_pages = get_order(sz);
2731 	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2732 	    GFP_KERNEL, ioc->chain_pages);
2733 	if (!ioc->chain_lookup) {
2734 		pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
2735 			ioc->name);
2736 		goto out;
2737 	}
2738 	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2739 	    ioc->request_sz, 16, 0);
2740 	if (!ioc->chain_dma_pool) {
2741 		pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
2742 			ioc->name);
2743 		goto out;
2744 	}
2745 	for (i = 0; i < ioc->chain_depth; i++) {
2746 		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2747 		    ioc->chain_dma_pool , GFP_KERNEL,
2748 		    &ioc->chain_lookup[i].chain_buffer_dma);
2749 		if (!ioc->chain_lookup[i].chain_buffer) {
2750 			ioc->chain_depth = i;
2751 			goto chain_done;
2752 		}
2753 		total_sz += ioc->request_sz;
2754 	}
2755  chain_done:
2756 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2757 		"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
2758 		ioc->name, ioc->chain_depth, ioc->request_sz,
2759 		((ioc->chain_depth *  ioc->request_sz))/1024));
2760 
2761 	/* initialize hi-priority queue smid's */
2762 	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2763 	    sizeof(struct request_tracker), GFP_KERNEL);
2764 	if (!ioc->hpr_lookup) {
2765 		pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
2766 		    ioc->name);
2767 		goto out;
2768 	}
2769 	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2770 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2771 		"hi_priority(0x%p): depth(%d), start smid(%d)\n",
2772 		ioc->name, ioc->hi_priority,
2773 	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2774 
2775 	/* initialize internal queue smid's */
2776 	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2777 	    sizeof(struct request_tracker), GFP_KERNEL);
2778 	if (!ioc->internal_lookup) {
2779 		pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
2780 		    ioc->name);
2781 		goto out;
2782 	}
2783 	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2784 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2785 		"internal(0x%p): depth(%d), start smid(%d)\n",
2786 		ioc->name, ioc->internal,
2787 	    ioc->internal_depth, ioc->internal_smid));
2788 
2789 	/* sense buffers, 4 byte align */
2790 	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2791 	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2792 	    0);
2793 	if (!ioc->sense_dma_pool) {
2794 		pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
2795 		    ioc->name);
2796 		goto out;
2797 	}
2798 	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2799 	    &ioc->sense_dma);
2800 	if (!ioc->sense) {
2801 		pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
2802 		    ioc->name);
2803 		goto out;
2804 	}
2805 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2806 	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2807 	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2808 	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2809 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
2810 	    ioc->name, (unsigned long long)ioc->sense_dma));
2811 	total_sz += sz;
2812 
2813 	/* reply pool, 4 byte align */
2814 	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2815 	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2816 	    0);
2817 	if (!ioc->reply_dma_pool) {
2818 		pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
2819 		    ioc->name);
2820 		goto out;
2821 	}
2822 	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2823 	    &ioc->reply_dma);
2824 	if (!ioc->reply) {
2825 		pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
2826 		    ioc->name);
2827 		goto out;
2828 	}
2829 	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2830 	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2831 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2832 		"reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
2833 		ioc->name, ioc->reply,
2834 	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2835 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
2836 	    ioc->name, (unsigned long long)ioc->reply_dma));
2837 	total_sz += sz;
2838 
2839 	/* reply free queue, 16 byte align */
2840 	sz = ioc->reply_free_queue_depth * 4;
2841 	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2842 	    ioc->pdev, sz, 16, 0);
2843 	if (!ioc->reply_free_dma_pool) {
2844 		pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
2845 			ioc->name);
2846 		goto out;
2847 	}
2848 	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2849 	    &ioc->reply_free_dma);
2850 	if (!ioc->reply_free) {
2851 		pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
2852 			ioc->name);
2853 		goto out;
2854 	}
2855 	memset(ioc->reply_free, 0, sz);
2856 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
2857 	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2858 	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2859 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2860 		"reply_free_dma (0x%llx)\n",
2861 		ioc->name, (unsigned long long)ioc->reply_free_dma));
2862 	total_sz += sz;
2863 
2864 	/* reply post queue, 16 byte align */
2865 	reply_post_free_sz = ioc->reply_post_queue_depth *
2866 	    sizeof(Mpi2DefaultReplyDescriptor_t);
2867 	if (_base_is_controller_msix_enabled(ioc))
2868 		sz = reply_post_free_sz * ioc->reply_queue_count;
2869 	else
2870 		sz = reply_post_free_sz;
2871 	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2872 	    ioc->pdev, sz, 16, 0);
2873 	if (!ioc->reply_post_free_dma_pool) {
2874 		pr_err(MPT3SAS_FMT
2875 			"reply_post_free pool: pci_pool_create failed\n",
2876 			ioc->name);
2877 		goto out;
2878 	}
2879 	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2880 	    GFP_KERNEL, &ioc->reply_post_free_dma);
2881 	if (!ioc->reply_post_free) {
2882 		pr_err(MPT3SAS_FMT
2883 			"reply_post_free pool: pci_pool_alloc failed\n",
2884 			ioc->name);
2885 		goto out;
2886 	}
2887 	memset(ioc->reply_post_free, 0, sz);
2888 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
2889 	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2890 	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2891 	    sz/1024));
2892 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2893 		"reply_post_free_dma = (0x%llx)\n",
2894 		ioc->name, (unsigned long long)
2895 	    ioc->reply_post_free_dma));
2896 	total_sz += sz;
2897 
2898 	ioc->config_page_sz = 512;
2899 	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2900 	    ioc->config_page_sz, &ioc->config_page_dma);
2901 	if (!ioc->config_page) {
2902 		pr_err(MPT3SAS_FMT
2903 			"config page: pci_pool_alloc failed\n",
2904 			ioc->name);
2905 		goto out;
2906 	}
2907 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2908 		"config page(0x%p): size(%d)\n",
2909 		ioc->name, ioc->config_page, ioc->config_page_sz));
2910 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
2911 		ioc->name, (unsigned long long)ioc->config_page_dma));
2912 	total_sz += ioc->config_page_sz;
2913 
2914 	pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
2915 	    ioc->name, total_sz/1024);
2916 	pr_info(MPT3SAS_FMT
2917 		"Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
2918 	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2919 	pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
2920 	    ioc->name, ioc->shost->sg_tablesize);
2921 	return 0;
2922 
2923  out:
2924 	return -ENOMEM;
2925 }
2926 
2927 /**
2928  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
2929  * @ioc: Pointer to MPT_ADAPTER structure
2930  * @cooked: Request raw or cooked IOC state
2931  *
2932  * Returns all IOC Doorbell register bits if cooked==0, else just the
2933  * Doorbell bits in MPI_IOC_STATE_MASK.
2934  */
2935 u32
2936 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
2937 {
2938 	u32 s, sc;
2939 
2940 	s = readl(&ioc->chip->Doorbell);
2941 	sc = s & MPI2_IOC_STATE_MASK;
2942 	return cooked ? sc : s;
2943 }
2944 
2945 /**
2946  * _base_wait_on_iocstate - waiting on a particular ioc state
2947  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2948  * @timeout: timeout in second
2949  * @sleep_flag: CAN_SLEEP or NO_SLEEP
2950  *
2951  * Returns 0 for success, non-zero for failure.
2952  */
2953 static int
2954 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2955 	int sleep_flag)
2956 {
2957 	u32 count, cntdn;
2958 	u32 current_state;
2959 
2960 	count = 0;
2961 	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2962 	do {
2963 		current_state = mpt3sas_base_get_iocstate(ioc, 1);
2964 		if (current_state == ioc_state)
2965 			return 0;
2966 		if (count && current_state == MPI2_IOC_STATE_FAULT)
2967 			break;
2968 		if (sleep_flag == CAN_SLEEP)
2969 			usleep_range(1000, 1500);
2970 		else
2971 			udelay(500);
2972 		count++;
2973 	} while (--cntdn);
2974 
2975 	return current_state;
2976 }
2977 
2978 /**
2979  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2980  * a write to the doorbell)
2981  * @ioc: per adapter object
2982  * @timeout: timeout in second
2983  * @sleep_flag: CAN_SLEEP or NO_SLEEP
2984  *
2985  * Returns 0 for success, non-zero for failure.
2986  *
2987  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
2988  */
2989 static int
2990 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
2991 	int sleep_flag)
2992 {
2993 	u32 cntdn, count;
2994 	u32 int_status;
2995 
2996 	count = 0;
2997 	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2998 	do {
2999 		int_status = readl(&ioc->chip->HostInterruptStatus);
3000 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3001 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
3002 				"%s: successful count(%d), timeout(%d)\n",
3003 				ioc->name, __func__, count, timeout));
3004 			return 0;
3005 		}
3006 		if (sleep_flag == CAN_SLEEP)
3007 			usleep_range(1000, 1500);
3008 		else
3009 			udelay(500);
3010 		count++;
3011 	} while (--cntdn);
3012 
3013 	pr_err(MPT3SAS_FMT
3014 		"%s: failed due to timeout count(%d), int_status(%x)!\n",
3015 		ioc->name, __func__, count, int_status);
3016 	return -EFAULT;
3017 }
3018 
3019 /**
3020  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3021  * @ioc: per adapter object
3022  * @timeout: timeout in second
3023  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3024  *
3025  * Returns 0 for success, non-zero for failure.
3026  *
3027  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3028  * doorbell.
3029  */
3030 static int
3031 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3032 	int sleep_flag)
3033 {
3034 	u32 cntdn, count;
3035 	u32 int_status;
3036 	u32 doorbell;
3037 
3038 	count = 0;
3039 	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3040 	do {
3041 		int_status = readl(&ioc->chip->HostInterruptStatus);
3042 		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3043 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
3044 				"%s: successful count(%d), timeout(%d)\n",
3045 				ioc->name, __func__, count, timeout));
3046 			return 0;
3047 		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3048 			doorbell = readl(&ioc->chip->Doorbell);
3049 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
3050 			    MPI2_IOC_STATE_FAULT) {
3051 				mpt3sas_base_fault_info(ioc , doorbell);
3052 				return -EFAULT;
3053 			}
3054 		} else if (int_status == 0xFFFFFFFF)
3055 			goto out;
3056 
3057 		if (sleep_flag == CAN_SLEEP)
3058 			usleep_range(1000, 1500);
3059 		else
3060 			udelay(500);
3061 		count++;
3062 	} while (--cntdn);
3063 
3064  out:
3065 	pr_err(MPT3SAS_FMT
3066 	 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3067 	 ioc->name, __func__, count, int_status);
3068 	return -EFAULT;
3069 }
3070 
3071 /**
3072  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3073  * @ioc: per adapter object
3074  * @timeout: timeout in second
3075  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3076  *
3077  * Returns 0 for success, non-zero for failure.
3078  *
3079  */
3080 static int
3081 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3082 	int sleep_flag)
3083 {
3084 	u32 cntdn, count;
3085 	u32 doorbell_reg;
3086 
3087 	count = 0;
3088 	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3089 	do {
3090 		doorbell_reg = readl(&ioc->chip->Doorbell);
3091 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3092 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
3093 				"%s: successful count(%d), timeout(%d)\n",
3094 				ioc->name, __func__, count, timeout));
3095 			return 0;
3096 		}
3097 		if (sleep_flag == CAN_SLEEP)
3098 			usleep_range(1000, 1500);
3099 		else
3100 			udelay(500);
3101 		count++;
3102 	} while (--cntdn);
3103 
3104 	pr_err(MPT3SAS_FMT
3105 		"%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3106 		ioc->name, __func__, count, doorbell_reg);
3107 	return -EFAULT;
3108 }
3109 
3110 /**
3111  * _base_send_ioc_reset - send doorbell reset
3112  * @ioc: per adapter object
3113  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3114  * @timeout: timeout in second
3115  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3116  *
3117  * Returns 0 for success, non-zero for failure.
3118  */
3119 static int
3120 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3121 	int sleep_flag)
3122 {
3123 	u32 ioc_state;
3124 	int r = 0;
3125 
3126 	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3127 		pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3128 		    ioc->name, __func__);
3129 		return -EFAULT;
3130 	}
3131 
3132 	if (!(ioc->facts.IOCCapabilities &
3133 	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3134 		return -EFAULT;
3135 
3136 	pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3137 
3138 	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3139 	    &ioc->chip->Doorbell);
3140 	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3141 		r = -EFAULT;
3142 		goto out;
3143 	}
3144 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3145 	    timeout, sleep_flag);
3146 	if (ioc_state) {
3147 		pr_err(MPT3SAS_FMT
3148 			"%s: failed going to ready state (ioc_state=0x%x)\n",
3149 			ioc->name, __func__, ioc_state);
3150 		r = -EFAULT;
3151 		goto out;
3152 	}
3153  out:
3154 	pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3155 	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3156 	return r;
3157 }
3158 
3159 /**
3160  * _base_handshake_req_reply_wait - send request thru doorbell interface
3161  * @ioc: per adapter object
3162  * @request_bytes: request length
3163  * @request: pointer having request payload
3164  * @reply_bytes: reply length
3165  * @reply: pointer to reply payload
3166  * @timeout: timeout in second
3167  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3168  *
3169  * Returns 0 for success, non-zero for failure.
3170  */
3171 static int
3172 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3173 	u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3174 {
3175 	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3176 	int i;
3177 	u8 failed;
3178 	u16 dummy;
3179 	__le32 *mfp;
3180 
3181 	/* make sure doorbell is not in use */
3182 	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3183 		pr_err(MPT3SAS_FMT
3184 			"doorbell is in use (line=%d)\n",
3185 			ioc->name, __LINE__);
3186 		return -EFAULT;
3187 	}
3188 
3189 	/* clear pending doorbell interrupts from previous state changes */
3190 	if (readl(&ioc->chip->HostInterruptStatus) &
3191 	    MPI2_HIS_IOC2SYS_DB_STATUS)
3192 		writel(0, &ioc->chip->HostInterruptStatus);
3193 
3194 	/* send message to ioc */
3195 	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3196 	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3197 	    &ioc->chip->Doorbell);
3198 
3199 	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3200 		pr_err(MPT3SAS_FMT
3201 			"doorbell handshake int failed (line=%d)\n",
3202 			ioc->name, __LINE__);
3203 		return -EFAULT;
3204 	}
3205 	writel(0, &ioc->chip->HostInterruptStatus);
3206 
3207 	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3208 		pr_err(MPT3SAS_FMT
3209 			"doorbell handshake ack failed (line=%d)\n",
3210 			ioc->name, __LINE__);
3211 		return -EFAULT;
3212 	}
3213 
3214 	/* send message 32-bits at a time */
3215 	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3216 		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3217 		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3218 			failed = 1;
3219 	}
3220 
3221 	if (failed) {
3222 		pr_err(MPT3SAS_FMT
3223 			"doorbell handshake sending request failed (line=%d)\n",
3224 			ioc->name, __LINE__);
3225 		return -EFAULT;
3226 	}
3227 
3228 	/* now wait for the reply */
3229 	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3230 		pr_err(MPT3SAS_FMT
3231 			"doorbell handshake int failed (line=%d)\n",
3232 			ioc->name, __LINE__);
3233 		return -EFAULT;
3234 	}
3235 
3236 	/* read the first two 16-bits, it gives the total length of the reply */
3237 	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3238 	    & MPI2_DOORBELL_DATA_MASK);
3239 	writel(0, &ioc->chip->HostInterruptStatus);
3240 	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3241 		pr_err(MPT3SAS_FMT
3242 			"doorbell handshake int failed (line=%d)\n",
3243 			ioc->name, __LINE__);
3244 		return -EFAULT;
3245 	}
3246 	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3247 	    & MPI2_DOORBELL_DATA_MASK);
3248 	writel(0, &ioc->chip->HostInterruptStatus);
3249 
3250 	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3251 		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3252 			pr_err(MPT3SAS_FMT
3253 				"doorbell handshake int failed (line=%d)\n",
3254 				ioc->name, __LINE__);
3255 			return -EFAULT;
3256 		}
3257 		if (i >=  reply_bytes/2) /* overflow case */
3258 			dummy = readl(&ioc->chip->Doorbell);
3259 		else
3260 			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3261 			    & MPI2_DOORBELL_DATA_MASK);
3262 		writel(0, &ioc->chip->HostInterruptStatus);
3263 	}
3264 
3265 	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3266 	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3267 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
3268 			"doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3269 	}
3270 	writel(0, &ioc->chip->HostInterruptStatus);
3271 
3272 	if (ioc->logging_level & MPT_DEBUG_INIT) {
3273 		mfp = (__le32 *)reply;
3274 		pr_info("\toffset:data\n");
3275 		for (i = 0; i < reply_bytes/4; i++)
3276 			pr_info("\t[0x%02x]:%08x\n", i*4,
3277 			    le32_to_cpu(mfp[i]));
3278 	}
3279 	return 0;
3280 }
3281 
3282 /**
3283  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
3284  * @ioc: per adapter object
3285  * @mpi_reply: the reply payload from FW
3286  * @mpi_request: the request payload sent to FW
3287  *
3288  * The SAS IO Unit Control Request message allows the host to perform low-level
3289  * operations, such as resets on the PHYs of the IO Unit, also allows the host
3290  * to obtain the IOC assigned device handles for a device if it has other
3291  * identifying information about the device, in addition allows the host to
3292  * remove IOC resources associated with the device.
3293  *
3294  * Returns 0 for success, non-zero for failure.
3295  */
3296 int
3297 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
3298 	Mpi2SasIoUnitControlReply_t *mpi_reply,
3299 	Mpi2SasIoUnitControlRequest_t *mpi_request)
3300 {
3301 	u16 smid;
3302 	u32 ioc_state;
3303 	unsigned long timeleft;
3304 	u8 issue_reset;
3305 	int rc;
3306 	void *request;
3307 	u16 wait_state_count;
3308 
3309 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3310 	    __func__));
3311 
3312 	mutex_lock(&ioc->base_cmds.mutex);
3313 
3314 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3315 		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3316 		    ioc->name, __func__);
3317 		rc = -EAGAIN;
3318 		goto out;
3319 	}
3320 
3321 	wait_state_count = 0;
3322 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3323 	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3324 		if (wait_state_count++ == 10) {
3325 			pr_err(MPT3SAS_FMT
3326 			    "%s: failed due to ioc not operational\n",
3327 			    ioc->name, __func__);
3328 			rc = -EFAULT;
3329 			goto out;
3330 		}
3331 		ssleep(1);
3332 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3333 		pr_info(MPT3SAS_FMT
3334 			"%s: waiting for operational state(count=%d)\n",
3335 			ioc->name, __func__, wait_state_count);
3336 	}
3337 
3338 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3339 	if (!smid) {
3340 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3341 		    ioc->name, __func__);
3342 		rc = -EAGAIN;
3343 		goto out;
3344 	}
3345 
3346 	rc = 0;
3347 	ioc->base_cmds.status = MPT3_CMD_PENDING;
3348 	request = mpt3sas_base_get_msg_frame(ioc, smid);
3349 	ioc->base_cmds.smid = smid;
3350 	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3351 	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3352 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3353 		ioc->ioc_link_reset_in_progress = 1;
3354 	init_completion(&ioc->base_cmds.done);
3355 	mpt3sas_base_put_smid_default(ioc, smid);
3356 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3357 	    msecs_to_jiffies(10000));
3358 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3359 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3360 	    ioc->ioc_link_reset_in_progress)
3361 		ioc->ioc_link_reset_in_progress = 0;
3362 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3363 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3364 		    ioc->name, __func__);
3365 		_debug_dump_mf(mpi_request,
3366 		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3367 		if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
3368 			issue_reset = 1;
3369 		goto issue_host_reset;
3370 	}
3371 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
3372 		memcpy(mpi_reply, ioc->base_cmds.reply,
3373 		    sizeof(Mpi2SasIoUnitControlReply_t));
3374 	else
3375 		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3376 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3377 	goto out;
3378 
3379  issue_host_reset:
3380 	if (issue_reset)
3381 		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3382 		    FORCE_BIG_HAMMER);
3383 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3384 	rc = -EFAULT;
3385  out:
3386 	mutex_unlock(&ioc->base_cmds.mutex);
3387 	return rc;
3388 }
3389 
3390 /**
3391  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
3392  * @ioc: per adapter object
3393  * @mpi_reply: the reply payload from FW
3394  * @mpi_request: the request payload sent to FW
3395  *
3396  * The SCSI Enclosure Processor request message causes the IOC to
3397  * communicate with SES devices to control LED status signals.
3398  *
3399  * Returns 0 for success, non-zero for failure.
3400  */
3401 int
3402 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
3403 	Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3404 {
3405 	u16 smid;
3406 	u32 ioc_state;
3407 	unsigned long timeleft;
3408 	u8 issue_reset;
3409 	int rc;
3410 	void *request;
3411 	u16 wait_state_count;
3412 
3413 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3414 	    __func__));
3415 
3416 	mutex_lock(&ioc->base_cmds.mutex);
3417 
3418 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3419 		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3420 		    ioc->name, __func__);
3421 		rc = -EAGAIN;
3422 		goto out;
3423 	}
3424 
3425 	wait_state_count = 0;
3426 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3427 	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3428 		if (wait_state_count++ == 10) {
3429 			pr_err(MPT3SAS_FMT
3430 			    "%s: failed due to ioc not operational\n",
3431 			    ioc->name, __func__);
3432 			rc = -EFAULT;
3433 			goto out;
3434 		}
3435 		ssleep(1);
3436 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3437 		pr_info(MPT3SAS_FMT
3438 			"%s: waiting for operational state(count=%d)\n",
3439 			ioc->name,
3440 		    __func__, wait_state_count);
3441 	}
3442 
3443 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3444 	if (!smid) {
3445 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3446 		    ioc->name, __func__);
3447 		rc = -EAGAIN;
3448 		goto out;
3449 	}
3450 
3451 	rc = 0;
3452 	ioc->base_cmds.status = MPT3_CMD_PENDING;
3453 	request = mpt3sas_base_get_msg_frame(ioc, smid);
3454 	ioc->base_cmds.smid = smid;
3455 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3456 	init_completion(&ioc->base_cmds.done);
3457 	mpt3sas_base_put_smid_default(ioc, smid);
3458 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3459 	    msecs_to_jiffies(10000));
3460 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3461 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3462 		    ioc->name, __func__);
3463 		_debug_dump_mf(mpi_request,
3464 		    sizeof(Mpi2SepRequest_t)/4);
3465 		if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
3466 			issue_reset = 1;
3467 		goto issue_host_reset;
3468 	}
3469 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
3470 		memcpy(mpi_reply, ioc->base_cmds.reply,
3471 		    sizeof(Mpi2SepReply_t));
3472 	else
3473 		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3474 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3475 	goto out;
3476 
3477  issue_host_reset:
3478 	if (issue_reset)
3479 		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3480 		    FORCE_BIG_HAMMER);
3481 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3482 	rc = -EFAULT;
3483  out:
3484 	mutex_unlock(&ioc->base_cmds.mutex);
3485 	return rc;
3486 }
3487 
3488 /**
3489  * _base_get_port_facts - obtain port facts reply and save in ioc
3490  * @ioc: per adapter object
3491  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3492  *
3493  * Returns 0 for success, non-zero for failure.
3494  */
3495 static int
3496 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
3497 {
3498 	Mpi2PortFactsRequest_t mpi_request;
3499 	Mpi2PortFactsReply_t mpi_reply;
3500 	struct mpt3sas_port_facts *pfacts;
3501 	int mpi_reply_sz, mpi_request_sz, r;
3502 
3503 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3504 	    __func__));
3505 
3506 	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3507 	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3508 	memset(&mpi_request, 0, mpi_request_sz);
3509 	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3510 	mpi_request.PortNumber = port;
3511 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3512 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3513 
3514 	if (r != 0) {
3515 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3516 		    ioc->name, __func__, r);
3517 		return r;
3518 	}
3519 
3520 	pfacts = &ioc->pfacts[port];
3521 	memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
3522 	pfacts->PortNumber = mpi_reply.PortNumber;
3523 	pfacts->VP_ID = mpi_reply.VP_ID;
3524 	pfacts->VF_ID = mpi_reply.VF_ID;
3525 	pfacts->MaxPostedCmdBuffers =
3526 	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3527 
3528 	return 0;
3529 }
3530 
3531 /**
3532  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3533  * @ioc: per adapter object
3534  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3535  *
3536  * Returns 0 for success, non-zero for failure.
3537  */
3538 static int
3539 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3540 {
3541 	Mpi2IOCFactsRequest_t mpi_request;
3542 	Mpi2IOCFactsReply_t mpi_reply;
3543 	struct mpt3sas_facts *facts;
3544 	int mpi_reply_sz, mpi_request_sz, r;
3545 
3546 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3547 	    __func__));
3548 
3549 	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3550 	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3551 	memset(&mpi_request, 0, mpi_request_sz);
3552 	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3553 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3554 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3555 
3556 	if (r != 0) {
3557 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3558 		    ioc->name, __func__, r);
3559 		return r;
3560 	}
3561 
3562 	facts = &ioc->facts;
3563 	memset(facts, 0, sizeof(struct mpt3sas_facts));
3564 	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3565 	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3566 	facts->VP_ID = mpi_reply.VP_ID;
3567 	facts->VF_ID = mpi_reply.VF_ID;
3568 	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3569 	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3570 	facts->WhoInit = mpi_reply.WhoInit;
3571 	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3572 	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3573 	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3574 	facts->MaxReplyDescriptorPostQueueDepth =
3575 	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3576 	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3577 	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3578 	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3579 		ioc->ir_firmware = 1;
3580 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3581 	facts->IOCRequestFrameSize =
3582 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3583 	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3584 	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3585 	ioc->shost->max_id = -1;
3586 	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3587 	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3588 	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3589 	facts->HighPriorityCredit =
3590 	    le16_to_cpu(mpi_reply.HighPriorityCredit);
3591 	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3592 	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3593 
3594 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
3595 		"hba queue depth(%d), max chains per io(%d)\n",
3596 		ioc->name, facts->RequestCredit,
3597 	    facts->MaxChainDepth));
3598 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
3599 		"request frame size(%d), reply frame size(%d)\n", ioc->name,
3600 	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3601 	return 0;
3602 }
3603 
3604 /**
3605  * _base_send_ioc_init - send ioc_init to firmware
3606  * @ioc: per adapter object
3607  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3608  *
3609  * Returns 0 for success, non-zero for failure.
3610  */
3611 static int
3612 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3613 {
3614 	Mpi2IOCInitRequest_t mpi_request;
3615 	Mpi2IOCInitReply_t mpi_reply;
3616 	int r;
3617 	struct timeval current_time;
3618 	u16 ioc_status;
3619 
3620 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3621 	    __func__));
3622 
3623 	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3624 	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3625 	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3626 	mpi_request.VF_ID = 0; /* TODO */
3627 	mpi_request.VP_ID = 0;
3628 	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3629 	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3630 
3631 	if (_base_is_controller_msix_enabled(ioc))
3632 		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3633 	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3634 	mpi_request.ReplyDescriptorPostQueueDepth =
3635 	    cpu_to_le16(ioc->reply_post_queue_depth);
3636 	mpi_request.ReplyFreeQueueDepth =
3637 	    cpu_to_le16(ioc->reply_free_queue_depth);
3638 
3639 	mpi_request.SenseBufferAddressHigh =
3640 	    cpu_to_le32((u64)ioc->sense_dma >> 32);
3641 	mpi_request.SystemReplyAddressHigh =
3642 	    cpu_to_le32((u64)ioc->reply_dma >> 32);
3643 	mpi_request.SystemRequestFrameBaseAddress =
3644 	    cpu_to_le64((u64)ioc->request_dma);
3645 	mpi_request.ReplyFreeQueueAddress =
3646 	    cpu_to_le64((u64)ioc->reply_free_dma);
3647 	mpi_request.ReplyDescriptorPostQueueAddress =
3648 	    cpu_to_le64((u64)ioc->reply_post_free_dma);
3649 
3650 
3651 	/* This time stamp specifies number of milliseconds
3652 	 * since epoch ~ midnight January 1, 1970.
3653 	 */
3654 	do_gettimeofday(&current_time);
3655 	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3656 	    (current_time.tv_usec / 1000));
3657 
3658 	if (ioc->logging_level & MPT_DEBUG_INIT) {
3659 		__le32 *mfp;
3660 		int i;
3661 
3662 		mfp = (__le32 *)&mpi_request;
3663 		pr_info("\toffset:data\n");
3664 		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3665 			pr_info("\t[0x%02x]:%08x\n", i*4,
3666 			    le32_to_cpu(mfp[i]));
3667 	}
3668 
3669 	r = _base_handshake_req_reply_wait(ioc,
3670 	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3671 	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3672 	    sleep_flag);
3673 
3674 	if (r != 0) {
3675 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3676 		    ioc->name, __func__, r);
3677 		return r;
3678 	}
3679 
3680 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3681 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3682 	    mpi_reply.IOCLogInfo) {
3683 		pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
3684 		r = -EIO;
3685 	}
3686 
3687 	return 0;
3688 }
3689 
3690 /**
3691  * mpt3sas_port_enable_done - command completion routine for port enable
3692  * @ioc: per adapter object
3693  * @smid: system request message index
3694  * @msix_index: MSIX table index supplied by the OS
3695  * @reply: reply message frame(lower 32bit addr)
3696  *
3697  * Return 1 meaning mf should be freed from _base_interrupt
3698  *        0 means the mf is freed from this function.
3699  */
3700 u8
3701 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3702 	u32 reply)
3703 {
3704 	MPI2DefaultReply_t *mpi_reply;
3705 	u16 ioc_status;
3706 
3707 	if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
3708 		return 1;
3709 
3710 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
3711 	if (!mpi_reply)
3712 		return 1;
3713 
3714 	if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
3715 		return 1;
3716 
3717 	ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
3718 	ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
3719 	ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
3720 	memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
3721 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3722 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3723 		ioc->port_enable_failed = 1;
3724 
3725 	if (ioc->is_driver_loading) {
3726 		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3727 			mpt3sas_port_enable_complete(ioc);
3728 			return 1;
3729 		} else {
3730 			ioc->start_scan_failed = ioc_status;
3731 			ioc->start_scan = 0;
3732 			return 1;
3733 		}
3734 	}
3735 	complete(&ioc->port_enable_cmds.done);
3736 	return 1;
3737 }
3738 
3739 /**
3740  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3741  * @ioc: per adapter object
3742  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3743  *
3744  * Returns 0 for success, non-zero for failure.
3745  */
3746 static int
3747 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3748 {
3749 	Mpi2PortEnableRequest_t *mpi_request;
3750 	Mpi2PortEnableReply_t *mpi_reply;
3751 	unsigned long timeleft;
3752 	int r = 0;
3753 	u16 smid;
3754 	u16 ioc_status;
3755 
3756 	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
3757 
3758 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
3759 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3760 		    ioc->name, __func__);
3761 		return -EAGAIN;
3762 	}
3763 
3764 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3765 	if (!smid) {
3766 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3767 		    ioc->name, __func__);
3768 		return -EAGAIN;
3769 	}
3770 
3771 	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
3772 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3773 	ioc->port_enable_cmds.smid = smid;
3774 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3775 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3776 
3777 	init_completion(&ioc->port_enable_cmds.done);
3778 	mpt3sas_base_put_smid_default(ioc, smid);
3779 	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
3780 	    300*HZ);
3781 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
3782 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3783 		    ioc->name, __func__);
3784 		_debug_dump_mf(mpi_request,
3785 		    sizeof(Mpi2PortEnableRequest_t)/4);
3786 		if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
3787 			r = -EFAULT;
3788 		else
3789 			r = -ETIME;
3790 		goto out;
3791 	}
3792 
3793 	mpi_reply = ioc->port_enable_cmds.reply;
3794 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3795 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3796 		pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
3797 		    ioc->name, __func__, ioc_status);
3798 		r = -EFAULT;
3799 		goto out;
3800 	}
3801 
3802  out:
3803 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
3804 	pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
3805 	    "SUCCESS" : "FAILED"));
3806 	return r;
3807 }
3808 
3809 /**
3810  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
3811  * @ioc: per adapter object
3812  *
3813  * Returns 0 for success, non-zero for failure.
3814  */
3815 int
3816 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
3817 {
3818 	Mpi2PortEnableRequest_t *mpi_request;
3819 	u16 smid;
3820 
3821 	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
3822 
3823 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
3824 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3825 		    ioc->name, __func__);
3826 		return -EAGAIN;
3827 	}
3828 
3829 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3830 	if (!smid) {
3831 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3832 		    ioc->name, __func__);
3833 		return -EAGAIN;
3834 	}
3835 
3836 	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
3837 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3838 	ioc->port_enable_cmds.smid = smid;
3839 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3840 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3841 
3842 	mpt3sas_base_put_smid_default(ioc, smid);
3843 	return 0;
3844 }
3845 
3846 /**
3847  * _base_determine_wait_on_discovery - desposition
3848  * @ioc: per adapter object
3849  *
3850  * Decide whether to wait on discovery to complete. Used to either
3851  * locate boot device, or report volumes ahead of physical devices.
3852  *
3853  * Returns 1 for wait, 0 for don't wait
3854  */
3855 static int
3856 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
3857 {
3858 	/* We wait for discovery to complete if IR firmware is loaded.
3859 	 * The sas topology events arrive before PD events, so we need time to
3860 	 * turn on the bit in ioc->pd_handles to indicate PD
3861 	 * Also, it maybe required to report Volumes ahead of physical
3862 	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
3863 	 */
3864 	if (ioc->ir_firmware)
3865 		return 1;
3866 
3867 	/* if no Bios, then we don't need to wait */
3868 	if (!ioc->bios_pg3.BiosVersion)
3869 		return 0;
3870 
3871 	/* Bios is present, then we drop down here.
3872 	 *
3873 	 * If there any entries in the Bios Page 2, then we wait
3874 	 * for discovery to complete.
3875 	 */
3876 
3877 	/* Current Boot Device */
3878 	if ((ioc->bios_pg2.CurrentBootDeviceForm &
3879 	    MPI2_BIOSPAGE2_FORM_MASK) ==
3880 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3881 	/* Request Boot Device */
3882 	   (ioc->bios_pg2.ReqBootDeviceForm &
3883 	    MPI2_BIOSPAGE2_FORM_MASK) ==
3884 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3885 	/* Alternate Request Boot Device */
3886 	   (ioc->bios_pg2.ReqAltBootDeviceForm &
3887 	    MPI2_BIOSPAGE2_FORM_MASK) ==
3888 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
3889 		return 0;
3890 
3891 	return 1;
3892 }
3893 
3894 /**
3895  * _base_unmask_events - turn on notification for this event
3896  * @ioc: per adapter object
3897  * @event: firmware event
3898  *
3899  * The mask is stored in ioc->event_masks.
3900  */
3901 static void
3902 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
3903 {
3904 	u32 desired_event;
3905 
3906 	if (event >= 128)
3907 		return;
3908 
3909 	desired_event = (1 << (event % 32));
3910 
3911 	if (event < 32)
3912 		ioc->event_masks[0] &= ~desired_event;
3913 	else if (event < 64)
3914 		ioc->event_masks[1] &= ~desired_event;
3915 	else if (event < 96)
3916 		ioc->event_masks[2] &= ~desired_event;
3917 	else if (event < 128)
3918 		ioc->event_masks[3] &= ~desired_event;
3919 }
3920 
3921 /**
3922  * _base_event_notification - send event notification
3923  * @ioc: per adapter object
3924  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3925  *
3926  * Returns 0 for success, non-zero for failure.
3927  */
3928 static int
3929 _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3930 {
3931 	Mpi2EventNotificationRequest_t *mpi_request;
3932 	unsigned long timeleft;
3933 	u16 smid;
3934 	int r = 0;
3935 	int i;
3936 
3937 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3938 	    __func__));
3939 
3940 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3941 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3942 		    ioc->name, __func__);
3943 		return -EAGAIN;
3944 	}
3945 
3946 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3947 	if (!smid) {
3948 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3949 		    ioc->name, __func__);
3950 		return -EAGAIN;
3951 	}
3952 	ioc->base_cmds.status = MPT3_CMD_PENDING;
3953 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3954 	ioc->base_cmds.smid = smid;
3955 	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3956 	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3957 	mpi_request->VF_ID = 0; /* TODO */
3958 	mpi_request->VP_ID = 0;
3959 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3960 		mpi_request->EventMasks[i] =
3961 		    cpu_to_le32(ioc->event_masks[i]);
3962 	init_completion(&ioc->base_cmds.done);
3963 	mpt3sas_base_put_smid_default(ioc, smid);
3964 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3965 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3966 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3967 		    ioc->name, __func__);
3968 		_debug_dump_mf(mpi_request,
3969 		    sizeof(Mpi2EventNotificationRequest_t)/4);
3970 		if (ioc->base_cmds.status & MPT3_CMD_RESET)
3971 			r = -EFAULT;
3972 		else
3973 			r = -ETIME;
3974 	} else
3975 		dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
3976 		    ioc->name, __func__));
3977 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3978 	return r;
3979 }
3980 
3981 /**
3982  * mpt3sas_base_validate_event_type - validating event types
3983  * @ioc: per adapter object
3984  * @event: firmware event
3985  *
3986  * This will turn on firmware event notification when application
3987  * ask for that event. We don't mask events that are already enabled.
3988  */
3989 void
3990 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
3991 {
3992 	int i, j;
3993 	u32 event_mask, desired_event;
3994 	u8 send_update_to_fw;
3995 
3996 	for (i = 0, send_update_to_fw = 0; i <
3997 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
3998 		event_mask = ~event_type[i];
3999 		desired_event = 1;
4000 		for (j = 0; j < 32; j++) {
4001 			if (!(event_mask & desired_event) &&
4002 			    (ioc->event_masks[i] & desired_event)) {
4003 				ioc->event_masks[i] &= ~desired_event;
4004 				send_update_to_fw = 1;
4005 			}
4006 			desired_event = (desired_event << 1);
4007 		}
4008 	}
4009 
4010 	if (!send_update_to_fw)
4011 		return;
4012 
4013 	mutex_lock(&ioc->base_cmds.mutex);
4014 	_base_event_notification(ioc, CAN_SLEEP);
4015 	mutex_unlock(&ioc->base_cmds.mutex);
4016 }
4017 
4018 /**
4019  * _base_diag_reset - the "big hammer" start of day reset
4020  * @ioc: per adapter object
4021  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4022  *
4023  * Returns 0 for success, non-zero for failure.
4024  */
4025 static int
4026 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4027 {
4028 	u32 host_diagnostic;
4029 	u32 ioc_state;
4030 	u32 count;
4031 	u32 hcb_size;
4032 
4033 	pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4034 
4035 	drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4036 	    ioc->name));
4037 
4038 	count = 0;
4039 	do {
4040 		/* Write magic sequence to WriteSequence register
4041 		 * Loop until in diagnostic mode
4042 		 */
4043 		drsprintk(ioc, pr_info(MPT3SAS_FMT
4044 			"write magic sequence\n", ioc->name));
4045 		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4046 		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4047 		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4048 		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4049 		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4050 		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4051 		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4052 
4053 		/* wait 100 msec */
4054 		if (sleep_flag == CAN_SLEEP)
4055 			msleep(100);
4056 		else
4057 			mdelay(100);
4058 
4059 		if (count++ > 20)
4060 			goto out;
4061 
4062 		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4063 		drsprintk(ioc, pr_info(MPT3SAS_FMT
4064 			"wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4065 		    ioc->name, count, host_diagnostic));
4066 
4067 	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4068 
4069 	hcb_size = readl(&ioc->chip->HCBSize);
4070 
4071 	drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4072 	    ioc->name));
4073 	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4074 	     &ioc->chip->HostDiagnostic);
4075 
4076 	/*This delay allows the chip PCIe hardware time to finish reset tasks*/
4077 	if (sleep_flag == CAN_SLEEP)
4078 		msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4079 	else
4080 		mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4081 
4082 	/* Approximately 300 second max wait */
4083 	for (count = 0; count < (300000000 /
4084 		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
4085 
4086 		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4087 
4088 		if (host_diagnostic == 0xFFFFFFFF)
4089 			goto out;
4090 		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4091 			break;
4092 
4093 		/* Wait to pass the second read delay window */
4094 		if (sleep_flag == CAN_SLEEP)
4095 			msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4096 								/ 1000);
4097 		else
4098 			mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4099 								/ 1000);
4100 	}
4101 
4102 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4103 
4104 		drsprintk(ioc, pr_info(MPT3SAS_FMT
4105 		"restart the adapter assuming the HCB Address points to good F/W\n",
4106 		    ioc->name));
4107 		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4108 		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4109 		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4110 
4111 		drsprintk(ioc, pr_info(MPT3SAS_FMT
4112 		    "re-enable the HCDW\n", ioc->name));
4113 		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4114 		    &ioc->chip->HCBSize);
4115 	}
4116 
4117 	drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
4118 	    ioc->name));
4119 	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4120 	    &ioc->chip->HostDiagnostic);
4121 
4122 	drsprintk(ioc, pr_info(MPT3SAS_FMT
4123 		"disable writes to the diagnostic register\n", ioc->name));
4124 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4125 
4126 	drsprintk(ioc, pr_info(MPT3SAS_FMT
4127 		"Wait for FW to go to the READY state\n", ioc->name));
4128 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
4129 	    sleep_flag);
4130 	if (ioc_state) {
4131 		pr_err(MPT3SAS_FMT
4132 			"%s: failed going to ready state (ioc_state=0x%x)\n",
4133 			ioc->name, __func__, ioc_state);
4134 		goto out;
4135 	}
4136 
4137 	pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
4138 	return 0;
4139 
4140  out:
4141 	pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
4142 	return -EFAULT;
4143 }
4144 
4145 /**
4146  * _base_make_ioc_ready - put controller in READY state
4147  * @ioc: per adapter object
4148  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4149  * @type: FORCE_BIG_HAMMER or SOFT_RESET
4150  *
4151  * Returns 0 for success, non-zero for failure.
4152  */
4153 static int
4154 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4155 	enum reset_type type)
4156 {
4157 	u32 ioc_state;
4158 	int rc;
4159 	int count;
4160 
4161 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4162 	    __func__));
4163 
4164 	if (ioc->pci_error_recovery)
4165 		return 0;
4166 
4167 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4168 	dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4169 	    ioc->name, __func__, ioc_state));
4170 
4171 	/* if in RESET state, it should move to READY state shortly */
4172 	count = 0;
4173 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
4174 		while ((ioc_state & MPI2_IOC_STATE_MASK) !=
4175 		    MPI2_IOC_STATE_READY) {
4176 			if (count++ == 10) {
4177 				pr_err(MPT3SAS_FMT
4178 					"%s: failed going to ready state (ioc_state=0x%x)\n",
4179 				    ioc->name, __func__, ioc_state);
4180 				return -EFAULT;
4181 			}
4182 			if (sleep_flag == CAN_SLEEP)
4183 				ssleep(1);
4184 			else
4185 				mdelay(1000);
4186 			ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4187 		}
4188 	}
4189 
4190 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4191 		return 0;
4192 
4193 	if (ioc_state & MPI2_DOORBELL_USED) {
4194 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
4195 			"unexpected doorbell active!\n",
4196 			ioc->name));
4197 		goto issue_diag_reset;
4198 	}
4199 
4200 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4201 		mpt3sas_base_fault_info(ioc, ioc_state &
4202 		    MPI2_DOORBELL_DATA_MASK);
4203 		goto issue_diag_reset;
4204 	}
4205 
4206 	if (type == FORCE_BIG_HAMMER)
4207 		goto issue_diag_reset;
4208 
4209 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4210 		if (!(_base_send_ioc_reset(ioc,
4211 		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4212 			return 0;
4213 	}
4214 
4215  issue_diag_reset:
4216 	rc = _base_diag_reset(ioc, CAN_SLEEP);
4217 	return rc;
4218 }
4219 
4220 /**
4221  * _base_make_ioc_operational - put controller in OPERATIONAL state
4222  * @ioc: per adapter object
4223  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4224  *
4225  * Returns 0 for success, non-zero for failure.
4226  */
4227 static int
4228 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4229 {
4230 	int r, i;
4231 	unsigned long	flags;
4232 	u32 reply_address;
4233 	u16 smid;
4234 	struct _tr_list *delayed_tr, *delayed_tr_next;
4235 	struct adapter_reply_queue *reply_q;
4236 	long reply_post_free;
4237 	u32 reply_post_free_sz;
4238 
4239 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4240 	    __func__));
4241 
4242 	/* clean the delayed target reset list */
4243 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4244 	    &ioc->delayed_tr_list, list) {
4245 		list_del(&delayed_tr->list);
4246 		kfree(delayed_tr);
4247 	}
4248 
4249 
4250 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4251 	    &ioc->delayed_tr_volume_list, list) {
4252 		list_del(&delayed_tr->list);
4253 		kfree(delayed_tr);
4254 	}
4255 
4256 	/* initialize the scsi lookup free list */
4257 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4258 	INIT_LIST_HEAD(&ioc->free_list);
4259 	smid = 1;
4260 	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4261 		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4262 		ioc->scsi_lookup[i].cb_idx = 0xFF;
4263 		ioc->scsi_lookup[i].smid = smid;
4264 		ioc->scsi_lookup[i].scmd = NULL;
4265 		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4266 		    &ioc->free_list);
4267 	}
4268 
4269 	/* hi-priority queue */
4270 	INIT_LIST_HEAD(&ioc->hpr_free_list);
4271 	smid = ioc->hi_priority_smid;
4272 	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4273 		ioc->hpr_lookup[i].cb_idx = 0xFF;
4274 		ioc->hpr_lookup[i].smid = smid;
4275 		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4276 		    &ioc->hpr_free_list);
4277 	}
4278 
4279 	/* internal queue */
4280 	INIT_LIST_HEAD(&ioc->internal_free_list);
4281 	smid = ioc->internal_smid;
4282 	for (i = 0; i < ioc->internal_depth; i++, smid++) {
4283 		ioc->internal_lookup[i].cb_idx = 0xFF;
4284 		ioc->internal_lookup[i].smid = smid;
4285 		list_add_tail(&ioc->internal_lookup[i].tracker_list,
4286 		    &ioc->internal_free_list);
4287 	}
4288 
4289 	/* chain pool */
4290 	INIT_LIST_HEAD(&ioc->free_chain_list);
4291 	for (i = 0; i < ioc->chain_depth; i++)
4292 		list_add_tail(&ioc->chain_lookup[i].tracker_list,
4293 		    &ioc->free_chain_list);
4294 
4295 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4296 
4297 	/* initialize Reply Free Queue */
4298 	for (i = 0, reply_address = (u32)ioc->reply_dma ;
4299 	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
4300 	    ioc->reply_sz)
4301 		ioc->reply_free[i] = cpu_to_le32(reply_address);
4302 
4303 	/* initialize reply queues */
4304 	if (ioc->is_driver_loading)
4305 		_base_assign_reply_queues(ioc);
4306 
4307 	/* initialize Reply Post Free Queue */
4308 	reply_post_free = (long)ioc->reply_post_free;
4309 	reply_post_free_sz = ioc->reply_post_queue_depth *
4310 	    sizeof(Mpi2DefaultReplyDescriptor_t);
4311 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4312 		reply_q->reply_post_host_index = 0;
4313 		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4314 		    reply_post_free;
4315 		for (i = 0; i < ioc->reply_post_queue_depth; i++)
4316 			reply_q->reply_post_free[i].Words =
4317 			    cpu_to_le64(ULLONG_MAX);
4318 		if (!_base_is_controller_msix_enabled(ioc))
4319 			goto skip_init_reply_post_free_queue;
4320 		reply_post_free += reply_post_free_sz;
4321 	}
4322  skip_init_reply_post_free_queue:
4323 
4324 	r = _base_send_ioc_init(ioc, sleep_flag);
4325 	if (r)
4326 		return r;
4327 
4328 	/* initialize reply free host index */
4329 	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4330 	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4331 
4332 	/* initialize reply post host index */
4333 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4334 		writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
4335 		    &ioc->chip->ReplyPostHostIndex);
4336 		if (!_base_is_controller_msix_enabled(ioc))
4337 			goto skip_init_reply_post_host_index;
4338 	}
4339 
4340  skip_init_reply_post_host_index:
4341 
4342 	_base_unmask_interrupts(ioc);
4343 	r = _base_event_notification(ioc, sleep_flag);
4344 	if (r)
4345 		return r;
4346 
4347 	if (sleep_flag == CAN_SLEEP)
4348 		_base_static_config_pages(ioc);
4349 
4350 
4351 	if (ioc->is_driver_loading) {
4352 		ioc->wait_for_discovery_to_complete =
4353 		    _base_determine_wait_on_discovery(ioc);
4354 
4355 		return r; /* scan_start and scan_finished support */
4356 	}
4357 
4358 	r = _base_send_port_enable(ioc, sleep_flag);
4359 	if (r)
4360 		return r;
4361 
4362 	return r;
4363 }
4364 
4365 /**
4366  * mpt3sas_base_free_resources - free resources controller resources
4367  * @ioc: per adapter object
4368  *
4369  * Return nothing.
4370  */
4371 void
4372 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
4373 {
4374 	struct pci_dev *pdev = ioc->pdev;
4375 
4376 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4377 	    __func__));
4378 
4379 	if (ioc->chip_phys && ioc->chip) {
4380 		_base_mask_interrupts(ioc);
4381 		ioc->shost_recovery = 1;
4382 		_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4383 		ioc->shost_recovery = 0;
4384 	}
4385 
4386 	_base_free_irq(ioc);
4387 	_base_disable_msix(ioc);
4388 
4389 	if (ioc->chip_phys && ioc->chip)
4390 		iounmap(ioc->chip);
4391 	ioc->chip_phys = 0;
4392 
4393 	if (pci_is_enabled(pdev)) {
4394 		pci_release_selected_regions(ioc->pdev, ioc->bars);
4395 		pci_disable_pcie_error_reporting(pdev);
4396 		pci_disable_device(pdev);
4397 	}
4398 	return;
4399 }
4400 
4401 /**
4402  * mpt3sas_base_attach - attach controller instance
4403  * @ioc: per adapter object
4404  *
4405  * Returns 0 for success, non-zero for failure.
4406  */
4407 int
4408 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
4409 {
4410 	int r, i;
4411 	int cpu_id, last_cpu_id = 0;
4412 
4413 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4414 	    __func__));
4415 
4416 	/* setup cpu_msix_table */
4417 	ioc->cpu_count = num_online_cpus();
4418 	for_each_online_cpu(cpu_id)
4419 		last_cpu_id = cpu_id;
4420 	ioc->cpu_msix_table_sz = last_cpu_id + 1;
4421 	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4422 	ioc->reply_queue_count = 1;
4423 	if (!ioc->cpu_msix_table) {
4424 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
4425 			"allocation for cpu_msix_table failed!!!\n",
4426 			ioc->name));
4427 		r = -ENOMEM;
4428 		goto out_free_resources;
4429 	}
4430 
4431 	r = mpt3sas_base_map_resources(ioc);
4432 	if (r)
4433 		goto out_free_resources;
4434 
4435 
4436 	pci_set_drvdata(ioc->pdev, ioc->shost);
4437 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4438 	if (r)
4439 		goto out_free_resources;
4440 
4441 	/*
4442 	 * In SAS3.0,
4443 	 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
4444 	 * Target Status - all require the IEEE formated scatter gather
4445 	 * elements.
4446 	 */
4447 
4448 	ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
4449 	ioc->build_sg = &_base_build_sg_ieee;
4450 	ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
4451 	ioc->mpi25 = 1;
4452 	ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
4453 
4454 	/*
4455 	 * These function pointers for other requests that don't
4456 	 * the require IEEE scatter gather elements.
4457 	 *
4458 	 * For example Configuration Pages and SAS IOUNIT Control don't.
4459 	 */
4460 	ioc->build_sg_mpi = &_base_build_sg;
4461 	ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
4462 
4463 	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4464 	if (r)
4465 		goto out_free_resources;
4466 
4467 	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4468 	    sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
4469 	if (!ioc->pfacts) {
4470 		r = -ENOMEM;
4471 		goto out_free_resources;
4472 	}
4473 
4474 	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
4475 		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
4476 		if (r)
4477 			goto out_free_resources;
4478 	}
4479 
4480 	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
4481 	if (r)
4482 		goto out_free_resources;
4483 
4484 	init_waitqueue_head(&ioc->reset_wq);
4485 
4486 	/* allocate memory pd handle bitmask list */
4487 	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4488 	if (ioc->facts.MaxDevHandle % 8)
4489 		ioc->pd_handles_sz++;
4490 	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
4491 	    GFP_KERNEL);
4492 	if (!ioc->pd_handles) {
4493 		r = -ENOMEM;
4494 		goto out_free_resources;
4495 	}
4496 	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
4497 	    GFP_KERNEL);
4498 	if (!ioc->blocking_handles) {
4499 		r = -ENOMEM;
4500 		goto out_free_resources;
4501 	}
4502 
4503 	ioc->fwfault_debug = mpt3sas_fwfault_debug;
4504 
4505 	/* base internal command bits */
4506 	mutex_init(&ioc->base_cmds.mutex);
4507 	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4508 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4509 
4510 	/* port_enable command bits */
4511 	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4512 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4513 
4514 	/* transport internal command bits */
4515 	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4516 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
4517 	mutex_init(&ioc->transport_cmds.mutex);
4518 
4519 	/* scsih internal command bits */
4520 	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4521 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
4522 	mutex_init(&ioc->scsih_cmds.mutex);
4523 
4524 	/* task management internal command bits */
4525 	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4526 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
4527 	mutex_init(&ioc->tm_cmds.mutex);
4528 
4529 	/* config page internal command bits */
4530 	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4531 	ioc->config_cmds.status = MPT3_CMD_NOT_USED;
4532 	mutex_init(&ioc->config_cmds.mutex);
4533 
4534 	/* ctl module internal command bits */
4535 	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4536 	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4537 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
4538 	mutex_init(&ioc->ctl_cmds.mutex);
4539 
4540 	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4541 	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4542 	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
4543 	    !ioc->ctl_cmds.sense) {
4544 		r = -ENOMEM;
4545 		goto out_free_resources;
4546 	}
4547 
4548 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4549 		ioc->event_masks[i] = -1;
4550 
4551 	/* here we enable the events we care about */
4552 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
4553 	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
4554 	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4555 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
4556 	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
4557 	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
4558 	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
4559 	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
4560 	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
4561 	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
4562 
4563 	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
4564 	if (r)
4565 		goto out_free_resources;
4566 
4567 	return 0;
4568 
4569  out_free_resources:
4570 
4571 	ioc->remove_host = 1;
4572 
4573 	mpt3sas_base_free_resources(ioc);
4574 	_base_release_memory_pools(ioc);
4575 	pci_set_drvdata(ioc->pdev, NULL);
4576 	kfree(ioc->cpu_msix_table);
4577 	kfree(ioc->pd_handles);
4578 	kfree(ioc->blocking_handles);
4579 	kfree(ioc->tm_cmds.reply);
4580 	kfree(ioc->transport_cmds.reply);
4581 	kfree(ioc->scsih_cmds.reply);
4582 	kfree(ioc->config_cmds.reply);
4583 	kfree(ioc->base_cmds.reply);
4584 	kfree(ioc->port_enable_cmds.reply);
4585 	kfree(ioc->ctl_cmds.reply);
4586 	kfree(ioc->ctl_cmds.sense);
4587 	kfree(ioc->pfacts);
4588 	ioc->ctl_cmds.reply = NULL;
4589 	ioc->base_cmds.reply = NULL;
4590 	ioc->tm_cmds.reply = NULL;
4591 	ioc->scsih_cmds.reply = NULL;
4592 	ioc->transport_cmds.reply = NULL;
4593 	ioc->config_cmds.reply = NULL;
4594 	ioc->pfacts = NULL;
4595 	return r;
4596 }
4597 
4598 
4599 /**
4600  * mpt3sas_base_detach - remove controller instance
4601  * @ioc: per adapter object
4602  *
4603  * Return nothing.
4604  */
4605 void
4606 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
4607 {
4608 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4609 	    __func__));
4610 
4611 	mpt3sas_base_stop_watchdog(ioc);
4612 	mpt3sas_base_free_resources(ioc);
4613 	_base_release_memory_pools(ioc);
4614 	pci_set_drvdata(ioc->pdev, NULL);
4615 	kfree(ioc->cpu_msix_table);
4616 	kfree(ioc->pd_handles);
4617 	kfree(ioc->blocking_handles);
4618 	kfree(ioc->pfacts);
4619 	kfree(ioc->ctl_cmds.reply);
4620 	kfree(ioc->ctl_cmds.sense);
4621 	kfree(ioc->base_cmds.reply);
4622 	kfree(ioc->port_enable_cmds.reply);
4623 	kfree(ioc->tm_cmds.reply);
4624 	kfree(ioc->transport_cmds.reply);
4625 	kfree(ioc->scsih_cmds.reply);
4626 	kfree(ioc->config_cmds.reply);
4627 }
4628 
4629 /**
4630  * _base_reset_handler - reset callback handler (for base)
4631  * @ioc: per adapter object
4632  * @reset_phase: phase
4633  *
4634  * The handler for doing any required cleanup or initialization.
4635  *
4636  * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
4637  * MPT3_IOC_DONE_RESET
4638  *
4639  * Return nothing.
4640  */
4641 static void
4642 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
4643 {
4644 	mpt3sas_scsih_reset_handler(ioc, reset_phase);
4645 	mpt3sas_ctl_reset_handler(ioc, reset_phase);
4646 	switch (reset_phase) {
4647 	case MPT3_IOC_PRE_RESET:
4648 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
4649 		"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
4650 		break;
4651 	case MPT3_IOC_AFTER_RESET:
4652 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
4653 		"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
4654 		if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
4655 			ioc->transport_cmds.status |= MPT3_CMD_RESET;
4656 			mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
4657 			complete(&ioc->transport_cmds.done);
4658 		}
4659 		if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4660 			ioc->base_cmds.status |= MPT3_CMD_RESET;
4661 			mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
4662 			complete(&ioc->base_cmds.done);
4663 		}
4664 		if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4665 			ioc->port_enable_failed = 1;
4666 			ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
4667 			mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
4668 			if (ioc->is_driver_loading) {
4669 				ioc->start_scan_failed =
4670 				    MPI2_IOCSTATUS_INTERNAL_ERROR;
4671 				ioc->start_scan = 0;
4672 				ioc->port_enable_cmds.status =
4673 				    MPT3_CMD_NOT_USED;
4674 			} else
4675 				complete(&ioc->port_enable_cmds.done);
4676 		}
4677 		if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
4678 			ioc->config_cmds.status |= MPT3_CMD_RESET;
4679 			mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
4680 			ioc->config_cmds.smid = USHRT_MAX;
4681 			complete(&ioc->config_cmds.done);
4682 		}
4683 		break;
4684 	case MPT3_IOC_DONE_RESET:
4685 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
4686 			"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
4687 		break;
4688 	}
4689 }
4690 
4691 /**
4692  * _wait_for_commands_to_complete - reset controller
4693  * @ioc: Pointer to MPT_ADAPTER structure
4694  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4695  *
4696  * This function waiting(3s) for all pending commands to complete
4697  * prior to putting controller in reset.
4698  */
4699 static void
4700 _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4701 {
4702 	u32 ioc_state;
4703 	unsigned long flags;
4704 	u16 i;
4705 
4706 	ioc->pending_io_count = 0;
4707 	if (sleep_flag != CAN_SLEEP)
4708 		return;
4709 
4710 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4711 	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
4712 		return;
4713 
4714 	/* pending command count */
4715 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4716 	for (i = 0; i < ioc->scsiio_depth; i++)
4717 		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4718 			ioc->pending_io_count++;
4719 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4720 
4721 	if (!ioc->pending_io_count)
4722 		return;
4723 
4724 	/* wait for pending commands to complete */
4725 	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4726 }
4727 
4728 /**
4729  * mpt3sas_base_hard_reset_handler - reset controller
4730  * @ioc: Pointer to MPT_ADAPTER structure
4731  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4732  * @type: FORCE_BIG_HAMMER or SOFT_RESET
4733  *
4734  * Returns 0 for success, non-zero for failure.
4735  */
4736 int
4737 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4738 	enum reset_type type)
4739 {
4740 	int r;
4741 	unsigned long flags;
4742 	u32 ioc_state;
4743 	u8 is_fault = 0, is_trigger = 0;
4744 
4745 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
4746 	    __func__));
4747 
4748 	if (ioc->pci_error_recovery) {
4749 		pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
4750 		    ioc->name, __func__);
4751 		r = 0;
4752 		goto out_unlocked;
4753 	}
4754 
4755 	if (mpt3sas_fwfault_debug)
4756 		mpt3sas_halt_firmware(ioc);
4757 
4758 	/* TODO - What we really should be doing is pulling
4759 	 * out all the code associated with NO_SLEEP; its never used.
4760 	 * That is legacy code from mpt fusion driver, ported over.
4761 	 * I will leave this BUG_ON here for now till its been resolved.
4762 	 */
4763 	BUG_ON(sleep_flag == NO_SLEEP);
4764 
4765 	/* wait for an active reset in progress to complete */
4766 	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4767 		do {
4768 			ssleep(1);
4769 		} while (ioc->shost_recovery == 1);
4770 		dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
4771 		    __func__));
4772 		return ioc->ioc_reset_in_progress_status;
4773 	}
4774 
4775 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4776 	ioc->shost_recovery = 1;
4777 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4778 
4779 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
4780 	    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
4781 	    (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
4782 	    MPT3_DIAG_BUFFER_IS_RELEASED))) {
4783 		is_trigger = 1;
4784 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4785 		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
4786 			is_fault = 1;
4787 	}
4788 	_base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
4789 	_wait_for_commands_to_complete(ioc, sleep_flag);
4790 	_base_mask_interrupts(ioc);
4791 	r = _base_make_ioc_ready(ioc, sleep_flag, type);
4792 	if (r)
4793 		goto out;
4794 	_base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
4795 
4796 	/* If this hard reset is called while port enable is active, then
4797 	 * there is no reason to call make_ioc_operational
4798 	 */
4799 	if (ioc->is_driver_loading && ioc->port_enable_failed) {
4800 		ioc->remove_host = 1;
4801 		r = -EFAULT;
4802 		goto out;
4803 	}
4804 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4805 	if (r)
4806 		goto out;
4807 	r = _base_make_ioc_operational(ioc, sleep_flag);
4808 	if (!r)
4809 		_base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
4810 
4811  out:
4812 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
4813 	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4814 
4815 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4816 	ioc->ioc_reset_in_progress_status = r;
4817 	ioc->shost_recovery = 0;
4818 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4819 	ioc->ioc_reset_count++;
4820 	mutex_unlock(&ioc->reset_in_progress_mutex);
4821 
4822  out_unlocked:
4823 	if ((r == 0) && is_trigger) {
4824 		if (is_fault)
4825 			mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
4826 		else
4827 			mpt3sas_trigger_master(ioc,
4828 			    MASTER_TRIGGER_ADAPTER_RESET);
4829 	}
4830 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
4831 	    __func__));
4832 	return r;
4833 }
4834