xref: /linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 	struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 	struct mpi3mr_drv_cmd *drv_cmd);
20 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24 static bool threaded_isr_poll = true;
25 module_param(threaded_isr_poll, bool, 0444);
26 MODULE_PARM_DESC(threaded_isr_poll,
27 			"Enablement of IRQ polling thread (default=true)");
28 
29 #if defined(writeq) && defined(CONFIG_64BIT)
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)30 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
31 	spinlock_t *write_queue_lock)
32 {
33 	writeq(b, addr);
34 }
35 #else
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)36 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
37 	spinlock_t *write_queue_lock)
38 {
39 	__u64 data_out = b;
40 	unsigned long flags;
41 
42 	spin_lock_irqsave(write_queue_lock, flags);
43 	writel((u32)(data_out), addr);
44 	writel((u32)(data_out >> 32), (addr + 4));
45 	spin_unlock_irqrestore(write_queue_lock, flags);
46 }
47 #endif
48 
49 static inline bool
mpi3mr_check_req_qfull(struct op_req_qinfo * op_req_q)50 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
51 {
52 	u16 pi, ci, max_entries;
53 	bool is_qfull = false;
54 
55 	pi = op_req_q->pi;
56 	ci = READ_ONCE(op_req_q->ci);
57 	max_entries = op_req_q->num_requests;
58 
59 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
60 		is_qfull = true;
61 
62 	return is_qfull;
63 }
64 
mpi3mr_sync_irqs(struct mpi3mr_ioc * mrioc)65 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
66 {
67 	u16 i, max_vectors;
68 
69 	max_vectors = mrioc->intr_info_count;
70 
71 	for (i = 0; i < max_vectors; i++)
72 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
73 }
74 
mpi3mr_ioc_disable_intr(struct mpi3mr_ioc * mrioc)75 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
76 {
77 	mrioc->intr_enabled = 0;
78 	mpi3mr_sync_irqs(mrioc);
79 }
80 
mpi3mr_ioc_enable_intr(struct mpi3mr_ioc * mrioc)81 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
82 {
83 	mrioc->intr_enabled = 1;
84 }
85 
mpi3mr_cleanup_isr(struct mpi3mr_ioc * mrioc)86 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
87 {
88 	u16 i;
89 
90 	mpi3mr_ioc_disable_intr(mrioc);
91 
92 	if (!mrioc->intr_info)
93 		return;
94 
95 	for (i = 0; i < mrioc->intr_info_count; i++)
96 		free_irq(pci_irq_vector(mrioc->pdev, i),
97 		    (mrioc->intr_info + i));
98 
99 	kfree(mrioc->intr_info);
100 	mrioc->intr_info = NULL;
101 	mrioc->intr_info_count = 0;
102 	mrioc->is_intr_info_set = false;
103 	pci_free_irq_vectors(mrioc->pdev);
104 }
105 
mpi3mr_add_sg_single(void * paddr,u8 flags,u32 length,dma_addr_t dma_addr)106 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
107 	dma_addr_t dma_addr)
108 {
109 	struct mpi3_sge_common *sgel = paddr;
110 
111 	sgel->flags = flags;
112 	sgel->length = cpu_to_le32(length);
113 	sgel->address = cpu_to_le64(dma_addr);
114 }
115 
mpi3mr_build_zero_len_sge(void * paddr)116 void mpi3mr_build_zero_len_sge(void *paddr)
117 {
118 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
119 
120 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
121 }
122 
mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)123 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
124 	dma_addr_t phys_addr)
125 {
126 	if (!phys_addr)
127 		return NULL;
128 
129 	if ((phys_addr < mrioc->reply_buf_dma) ||
130 	    (phys_addr > mrioc->reply_buf_dma_max_address))
131 		return NULL;
132 
133 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
134 }
135 
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)136 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
137 	dma_addr_t phys_addr)
138 {
139 	if (!phys_addr)
140 		return NULL;
141 
142 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
143 }
144 
mpi3mr_repost_reply_buf(struct mpi3mr_ioc * mrioc,u64 reply_dma)145 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
146 	u64 reply_dma)
147 {
148 	u32 old_idx = 0;
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
152 	old_idx  =  mrioc->reply_free_queue_host_index;
153 	mrioc->reply_free_queue_host_index = (
154 	    (mrioc->reply_free_queue_host_index ==
155 	    (mrioc->reply_free_qsz - 1)) ? 0 :
156 	    (mrioc->reply_free_queue_host_index + 1));
157 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
158 	writel(mrioc->reply_free_queue_host_index,
159 	    &mrioc->sysif_regs->reply_free_host_index);
160 	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
161 }
162 
mpi3mr_repost_sense_buf(struct mpi3mr_ioc * mrioc,u64 sense_buf_dma)163 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
164 	u64 sense_buf_dma)
165 {
166 	u32 old_idx = 0;
167 	unsigned long flags;
168 
169 	spin_lock_irqsave(&mrioc->sbq_lock, flags);
170 	old_idx  =  mrioc->sbq_host_index;
171 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
172 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
173 	    (mrioc->sbq_host_index + 1));
174 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
175 	writel(mrioc->sbq_host_index,
176 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
177 	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
178 }
179 
mpi3mr_print_event_data(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)180 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
181 	struct mpi3_event_notification_reply *event_reply)
182 {
183 	char *desc = NULL;
184 	u16 event;
185 
186 	if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
187 		return;
188 
189 	event = event_reply->event;
190 
191 	switch (event) {
192 	case MPI3_EVENT_LOG_DATA:
193 		desc = "Log Data";
194 		break;
195 	case MPI3_EVENT_CHANGE:
196 		desc = "Event Change";
197 		break;
198 	case MPI3_EVENT_GPIO_INTERRUPT:
199 		desc = "GPIO Interrupt";
200 		break;
201 	case MPI3_EVENT_CABLE_MGMT:
202 		desc = "Cable Management";
203 		break;
204 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
205 		desc = "Energy Pack Change";
206 		break;
207 	case MPI3_EVENT_DEVICE_ADDED:
208 	{
209 		struct mpi3_device_page0 *event_data =
210 		    (struct mpi3_device_page0 *)event_reply->event_data;
211 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
212 		    event_data->dev_handle, event_data->device_form);
213 		return;
214 	}
215 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
216 	{
217 		struct mpi3_device_page0 *event_data =
218 		    (struct mpi3_device_page0 *)event_reply->event_data;
219 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
220 		    event_data->dev_handle, event_data->device_form);
221 		return;
222 	}
223 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
224 	{
225 		struct mpi3_event_data_device_status_change *event_data =
226 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
227 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
228 		    event_data->dev_handle, event_data->reason_code);
229 		return;
230 	}
231 	case MPI3_EVENT_SAS_DISCOVERY:
232 	{
233 		struct mpi3_event_data_sas_discovery *event_data =
234 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
235 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
236 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
237 		    "start" : "stop",
238 		    le32_to_cpu(event_data->discovery_status));
239 		return;
240 	}
241 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
242 		desc = "SAS Broadcast Primitive";
243 		break;
244 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
245 		desc = "SAS Notify Primitive";
246 		break;
247 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
248 		desc = "SAS Init Device Status Change";
249 		break;
250 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
251 		desc = "SAS Init Table Overflow";
252 		break;
253 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
254 		desc = "SAS Topology Change List";
255 		break;
256 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
257 		desc = "Enclosure Device Status Change";
258 		break;
259 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
260 		desc = "Enclosure Added";
261 		break;
262 	case MPI3_EVENT_HARD_RESET_RECEIVED:
263 		desc = "Hard Reset Received";
264 		break;
265 	case MPI3_EVENT_SAS_PHY_COUNTER:
266 		desc = "SAS PHY Counter";
267 		break;
268 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
269 		desc = "SAS Device Discovery Error";
270 		break;
271 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
272 		desc = "PCIE Topology Change List";
273 		break;
274 	case MPI3_EVENT_PCIE_ENUMERATION:
275 	{
276 		struct mpi3_event_data_pcie_enumeration *event_data =
277 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
278 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
279 		    (event_data->reason_code ==
280 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
281 		if (event_data->enumeration_status)
282 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
283 			    le32_to_cpu(event_data->enumeration_status));
284 		return;
285 	}
286 	case MPI3_EVENT_PREPARE_FOR_RESET:
287 		desc = "Prepare For Reset";
288 		break;
289 	case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
290 		desc = "Diagnostic Buffer Status Change";
291 		break;
292 	}
293 
294 	if (!desc)
295 		return;
296 
297 	ioc_info(mrioc, "%s\n", desc);
298 }
299 
mpi3mr_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply * def_reply)300 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
301 	struct mpi3_default_reply *def_reply)
302 {
303 	struct mpi3_event_notification_reply *event_reply =
304 	    (struct mpi3_event_notification_reply *)def_reply;
305 
306 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
307 	mpi3mr_print_event_data(mrioc, event_reply);
308 	mpi3mr_os_handle_events(mrioc, event_reply);
309 }
310 
311 static struct mpi3mr_drv_cmd *
mpi3mr_get_drv_cmd(struct mpi3mr_ioc * mrioc,u16 host_tag,struct mpi3_default_reply * def_reply)312 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
313 	struct mpi3_default_reply *def_reply)
314 {
315 	u16 idx;
316 
317 	switch (host_tag) {
318 	case MPI3MR_HOSTTAG_INITCMDS:
319 		return &mrioc->init_cmds;
320 	case MPI3MR_HOSTTAG_CFG_CMDS:
321 		return &mrioc->cfg_cmds;
322 	case MPI3MR_HOSTTAG_BSG_CMDS:
323 		return &mrioc->bsg_cmds;
324 	case MPI3MR_HOSTTAG_BLK_TMS:
325 		return &mrioc->host_tm_cmds;
326 	case MPI3MR_HOSTTAG_PEL_ABORT:
327 		return &mrioc->pel_abort_cmd;
328 	case MPI3MR_HOSTTAG_PEL_WAIT:
329 		return &mrioc->pel_cmds;
330 	case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
331 		return &mrioc->transport_cmds;
332 	case MPI3MR_HOSTTAG_INVALID:
333 		if (def_reply && def_reply->function ==
334 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
335 			mpi3mr_handle_events(mrioc, def_reply);
336 		return NULL;
337 	default:
338 		break;
339 	}
340 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
341 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
342 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
343 		return &mrioc->dev_rmhs_cmds[idx];
344 	}
345 
346 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
347 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
348 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
349 		return &mrioc->evtack_cmds[idx];
350 	}
351 
352 	return NULL;
353 }
354 
mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma)355 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
356 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
357 {
358 	u16 reply_desc_type, host_tag = 0;
359 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
360 	u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
361 	u32 ioc_loginfo = 0, sense_count = 0;
362 	struct mpi3_status_reply_descriptor *status_desc;
363 	struct mpi3_address_reply_descriptor *addr_desc;
364 	struct mpi3_success_reply_descriptor *success_desc;
365 	struct mpi3_default_reply *def_reply = NULL;
366 	struct mpi3mr_drv_cmd *cmdptr = NULL;
367 	struct mpi3_scsi_io_reply *scsi_reply;
368 	struct scsi_sense_hdr sshdr;
369 	u8 *sense_buf = NULL;
370 
371 	*reply_dma = 0;
372 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
373 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
374 	switch (reply_desc_type) {
375 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
376 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
377 		host_tag = le16_to_cpu(status_desc->host_tag);
378 		ioc_status = le16_to_cpu(status_desc->ioc_status);
379 		if (ioc_status &
380 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
381 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
382 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
383 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
384 		break;
385 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
386 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
387 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
388 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
389 		if (!def_reply)
390 			goto out;
391 		host_tag = le16_to_cpu(def_reply->host_tag);
392 		ioc_status = le16_to_cpu(def_reply->ioc_status);
393 		if (ioc_status &
394 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
395 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
396 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
397 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
398 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
399 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
400 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
401 			sense_count = le32_to_cpu(scsi_reply->sense_count);
402 			if (sense_buf) {
403 				scsi_normalize_sense(sense_buf, sense_count,
404 				    &sshdr);
405 				mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
406 				    sshdr.asc, sshdr.ascq);
407 			}
408 		}
409 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
410 		break;
411 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
412 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
413 		host_tag = le16_to_cpu(success_desc->host_tag);
414 		break;
415 	default:
416 		break;
417 	}
418 
419 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
420 	if (cmdptr) {
421 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
422 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
423 			cmdptr->ioc_loginfo = ioc_loginfo;
424 			if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
425 				cmdptr->ioc_status = ioc_status;
426 			else
427 				cmdptr->ioc_status = masked_ioc_status;
428 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
429 			if (def_reply) {
430 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
431 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
432 				    mrioc->reply_sz);
433 			}
434 			if (sense_buf && cmdptr->sensebuf) {
435 				cmdptr->is_sense = 1;
436 				memcpy(cmdptr->sensebuf, sense_buf,
437 				       MPI3MR_SENSE_BUF_SZ);
438 			}
439 			if (cmdptr->is_waiting) {
440 				cmdptr->is_waiting = 0;
441 				complete(&cmdptr->done);
442 			} else if (cmdptr->callback)
443 				cmdptr->callback(mrioc, cmdptr);
444 		}
445 	}
446 out:
447 	if (sense_buf)
448 		mpi3mr_repost_sense_buf(mrioc,
449 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
450 }
451 
mpi3mr_process_admin_reply_q(struct mpi3mr_ioc * mrioc)452 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
453 {
454 	u32 exp_phase = mrioc->admin_reply_ephase;
455 	u32 admin_reply_ci = mrioc->admin_reply_ci;
456 	u32 num_admin_replies = 0;
457 	u64 reply_dma = 0;
458 	u16 threshold_comps = 0;
459 	struct mpi3_default_reply_descriptor *reply_desc;
460 
461 	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
462 		atomic_inc(&mrioc->admin_pend_isr);
463 		return 0;
464 	}
465 
466 	atomic_set(&mrioc->admin_pend_isr, 0);
467 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
468 	    admin_reply_ci;
469 
470 	if ((le16_to_cpu(reply_desc->reply_flags) &
471 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
472 		atomic_dec(&mrioc->admin_reply_q_in_use);
473 		return 0;
474 	}
475 
476 	do {
477 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
478 			break;
479 
480 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
481 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
482 		if (reply_dma)
483 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
484 		num_admin_replies++;
485 		threshold_comps++;
486 		if (++admin_reply_ci == mrioc->num_admin_replies) {
487 			admin_reply_ci = 0;
488 			exp_phase ^= 1;
489 		}
490 		reply_desc =
491 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
492 		    admin_reply_ci;
493 		if ((le16_to_cpu(reply_desc->reply_flags) &
494 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
495 			break;
496 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
497 			writel(admin_reply_ci,
498 			    &mrioc->sysif_regs->admin_reply_queue_ci);
499 			threshold_comps = 0;
500 		}
501 	} while (1);
502 
503 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
504 	mrioc->admin_reply_ci = admin_reply_ci;
505 	mrioc->admin_reply_ephase = exp_phase;
506 	atomic_dec(&mrioc->admin_reply_q_in_use);
507 
508 	return num_admin_replies;
509 }
510 
511 /**
512  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
513  *	queue's consumer index from operational reply descriptor queue.
514  * @op_reply_q: op_reply_qinfo object
515  * @reply_ci: operational reply descriptor's queue consumer index
516  *
517  * Returns: reply descriptor frame address
518  */
519 static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo * op_reply_q,u32 reply_ci)520 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
521 {
522 	void *segment_base_addr;
523 	struct segments *segments = op_reply_q->q_segments;
524 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
525 
526 	segment_base_addr =
527 	    segments[reply_ci / op_reply_q->segment_qd].segment;
528 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
529 	    (reply_ci % op_reply_q->segment_qd);
530 	return reply_desc;
531 }
532 
533 /**
534  * mpi3mr_process_op_reply_q - Operational reply queue handler
535  * @mrioc: Adapter instance reference
536  * @op_reply_q: Operational reply queue info
537  *
538  * Checks the specific operational reply queue and drains the
539  * reply queue entries until the queue is empty and process the
540  * individual reply descriptors.
541  *
542  * Return: 0 if queue is already processed,or number of reply
543  *	    descriptors processed.
544  */
mpi3mr_process_op_reply_q(struct mpi3mr_ioc * mrioc,struct op_reply_qinfo * op_reply_q)545 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
546 	struct op_reply_qinfo *op_reply_q)
547 {
548 	struct op_req_qinfo *op_req_q;
549 	u32 exp_phase;
550 	u32 reply_ci;
551 	u32 num_op_reply = 0;
552 	u64 reply_dma = 0;
553 	struct mpi3_default_reply_descriptor *reply_desc;
554 	u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
555 
556 	reply_qidx = op_reply_q->qid - 1;
557 
558 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
559 		return 0;
560 
561 	exp_phase = op_reply_q->ephase;
562 	reply_ci = op_reply_q->ci;
563 
564 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
565 	if ((le16_to_cpu(reply_desc->reply_flags) &
566 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
567 		atomic_dec(&op_reply_q->in_use);
568 		return 0;
569 	}
570 
571 	do {
572 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
573 			break;
574 
575 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
576 		op_req_q = &mrioc->req_qinfo[req_q_idx];
577 
578 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
579 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
580 		    reply_qidx);
581 
582 		if (reply_dma)
583 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
584 		num_op_reply++;
585 		threshold_comps++;
586 
587 		if (++reply_ci == op_reply_q->num_replies) {
588 			reply_ci = 0;
589 			exp_phase ^= 1;
590 		}
591 
592 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
593 
594 		if ((le16_to_cpu(reply_desc->reply_flags) &
595 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
596 			break;
597 #ifndef CONFIG_PREEMPT_RT
598 		/*
599 		 * Exit completion loop to avoid CPU lockup
600 		 * Ensure remaining completion happens from threaded ISR.
601 		 */
602 		if ((num_op_reply > mrioc->max_host_ios) &&
603 			(threaded_isr_poll == true)) {
604 			op_reply_q->enable_irq_poll = true;
605 			break;
606 		}
607 #endif
608 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
609 			writel(reply_ci,
610 			    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
611 			atomic_sub(threshold_comps, &op_reply_q->pend_ios);
612 			threshold_comps = 0;
613 		}
614 	} while (1);
615 
616 	writel(reply_ci,
617 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
618 	op_reply_q->ci = reply_ci;
619 	op_reply_q->ephase = exp_phase;
620 	atomic_sub(threshold_comps, &op_reply_q->pend_ios);
621 	atomic_dec(&op_reply_q->in_use);
622 	return num_op_reply;
623 }
624 
625 /**
626  * mpi3mr_blk_mq_poll - Operational reply queue handler
627  * @shost: SCSI Host reference
628  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
629  *
630  * Checks the specific operational reply queue and drains the
631  * reply queue entries until the queue is empty and process the
632  * individual reply descriptors.
633  *
634  * Return: 0 if queue is already processed,or number of reply
635  *	    descriptors processed.
636  */
mpi3mr_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)637 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
638 {
639 	int num_entries = 0;
640 	struct mpi3mr_ioc *mrioc;
641 
642 	mrioc = (struct mpi3mr_ioc *)shost->hostdata;
643 
644 	if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
645 	    mrioc->unrecoverable || mrioc->pci_err_recovery))
646 		return 0;
647 
648 	num_entries = mpi3mr_process_op_reply_q(mrioc,
649 			&mrioc->op_reply_qinfo[queue_num]);
650 
651 	return num_entries;
652 }
653 
mpi3mr_isr_primary(int irq,void * privdata)654 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
655 {
656 	struct mpi3mr_intr_info *intr_info = privdata;
657 	struct mpi3mr_ioc *mrioc;
658 	u16 midx;
659 	u32 num_admin_replies = 0, num_op_reply = 0;
660 
661 	if (!intr_info)
662 		return IRQ_NONE;
663 
664 	mrioc = intr_info->mrioc;
665 
666 	if (!mrioc->intr_enabled)
667 		return IRQ_NONE;
668 
669 	midx = intr_info->msix_index;
670 
671 	if (!midx)
672 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
673 	if (intr_info->op_reply_q)
674 		num_op_reply = mpi3mr_process_op_reply_q(mrioc,
675 		    intr_info->op_reply_q);
676 
677 	if (num_admin_replies || num_op_reply)
678 		return IRQ_HANDLED;
679 	else
680 		return IRQ_NONE;
681 }
682 
683 #ifndef CONFIG_PREEMPT_RT
684 
mpi3mr_isr(int irq,void * privdata)685 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
686 {
687 	struct mpi3mr_intr_info *intr_info = privdata;
688 	int ret;
689 
690 	if (!intr_info)
691 		return IRQ_NONE;
692 
693 	/* Call primary ISR routine */
694 	ret = mpi3mr_isr_primary(irq, privdata);
695 
696 	/*
697 	 * If more IOs are expected, schedule IRQ polling thread.
698 	 * Otherwise exit from ISR.
699 	 */
700 	if ((threaded_isr_poll == false) || !intr_info->op_reply_q)
701 		return ret;
702 
703 	if (!intr_info->op_reply_q->enable_irq_poll ||
704 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
705 		return ret;
706 
707 	disable_irq_nosync(intr_info->os_irq);
708 
709 	return IRQ_WAKE_THREAD;
710 }
711 
712 /**
713  * mpi3mr_isr_poll - Reply queue polling routine
714  * @irq: IRQ
715  * @privdata: Interrupt info
716  *
717  * poll for pending I/O completions in a loop until pending I/Os
718  * present or controller queue depth I/Os are processed.
719  *
720  * Return: IRQ_NONE or IRQ_HANDLED
721  */
mpi3mr_isr_poll(int irq,void * privdata)722 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
723 {
724 	struct mpi3mr_intr_info *intr_info = privdata;
725 	struct mpi3mr_ioc *mrioc;
726 	u16 midx;
727 	u32 num_op_reply = 0;
728 
729 	if (!intr_info || !intr_info->op_reply_q)
730 		return IRQ_NONE;
731 
732 	mrioc = intr_info->mrioc;
733 	midx = intr_info->msix_index;
734 
735 	/* Poll for pending IOs completions */
736 	do {
737 		if (!mrioc->intr_enabled || mrioc->unrecoverable)
738 			break;
739 
740 		if (!midx)
741 			mpi3mr_process_admin_reply_q(mrioc);
742 		if (intr_info->op_reply_q)
743 			num_op_reply +=
744 			    mpi3mr_process_op_reply_q(mrioc,
745 				intr_info->op_reply_q);
746 
747 		usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
748 
749 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
750 	    (num_op_reply < mrioc->max_host_ios));
751 
752 	intr_info->op_reply_q->enable_irq_poll = false;
753 	enable_irq(intr_info->os_irq);
754 
755 	return IRQ_HANDLED;
756 }
757 
758 #endif
759 
760 /**
761  * mpi3mr_request_irq - Request IRQ and register ISR
762  * @mrioc: Adapter instance reference
763  * @index: IRQ vector index
764  *
765  * Request threaded ISR with primary ISR and secondary
766  *
767  * Return: 0 on success and non zero on failures.
768  */
mpi3mr_request_irq(struct mpi3mr_ioc * mrioc,u16 index)769 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
770 {
771 	struct pci_dev *pdev = mrioc->pdev;
772 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
773 	int retval = 0;
774 
775 	intr_info->mrioc = mrioc;
776 	intr_info->msix_index = index;
777 	intr_info->op_reply_q = NULL;
778 
779 	scnprintf(intr_info->name, MPI3MR_NAME_LENGTH,
780 	    "%.32s%d-msix%u", mrioc->driver_name, mrioc->id, index);
781 
782 #ifndef CONFIG_PREEMPT_RT
783 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
784 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
785 #else
786 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
787 	    NULL, IRQF_SHARED, intr_info->name, intr_info);
788 #endif
789 	if (retval) {
790 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
791 		    intr_info->name, pci_irq_vector(pdev, index));
792 		return retval;
793 	}
794 
795 	intr_info->os_irq = pci_irq_vector(pdev, index);
796 	return retval;
797 }
798 
mpi3mr_calc_poll_queues(struct mpi3mr_ioc * mrioc,u16 max_vectors)799 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
800 {
801 	if (!mrioc->requested_poll_qcount)
802 		return;
803 
804 	/* Reserved for Admin and Default Queue */
805 	if (max_vectors > 2 &&
806 		(mrioc->requested_poll_qcount < max_vectors - 2)) {
807 		ioc_info(mrioc,
808 		    "enabled polled queues (%d) msix (%d)\n",
809 		    mrioc->requested_poll_qcount, max_vectors);
810 	} else {
811 		ioc_info(mrioc,
812 		    "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
813 		    mrioc->requested_poll_qcount, max_vectors);
814 		mrioc->requested_poll_qcount = 0;
815 	}
816 }
817 
818 /**
819  * mpi3mr_setup_isr - Setup ISR for the controller
820  * @mrioc: Adapter instance reference
821  * @setup_one: Request one IRQ or more
822  *
823  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
824  *
825  * Return: 0 on success and non zero on failures.
826  */
mpi3mr_setup_isr(struct mpi3mr_ioc * mrioc,u8 setup_one)827 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
828 {
829 	unsigned int irq_flags = PCI_IRQ_MSIX;
830 	int max_vectors, min_vec;
831 	int retval;
832 	int i;
833 	struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
834 
835 	if (mrioc->is_intr_info_set)
836 		return 0;
837 
838 	mpi3mr_cleanup_isr(mrioc);
839 
840 	if (setup_one || reset_devices) {
841 		max_vectors = 1;
842 		retval = pci_alloc_irq_vectors(mrioc->pdev,
843 		    1, max_vectors, irq_flags);
844 		if (retval < 0) {
845 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
846 			    retval);
847 			goto out_failed;
848 		}
849 	} else {
850 		max_vectors =
851 		    min_t(int, mrioc->cpu_count + 1 +
852 			mrioc->requested_poll_qcount, mrioc->msix_count);
853 
854 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
855 
856 		ioc_info(mrioc,
857 		    "MSI-X vectors supported: %d, no of cores: %d,",
858 		    mrioc->msix_count, mrioc->cpu_count);
859 		ioc_info(mrioc,
860 		    "MSI-x vectors requested: %d poll_queues %d\n",
861 		    max_vectors, mrioc->requested_poll_qcount);
862 
863 		desc.post_vectors = mrioc->requested_poll_qcount;
864 		min_vec = desc.pre_vectors + desc.post_vectors;
865 		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
866 
867 		retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
868 			min_vec, max_vectors, irq_flags, &desc);
869 
870 		if (retval < 0) {
871 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
872 			    retval);
873 			goto out_failed;
874 		}
875 
876 
877 		/*
878 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
879 		 * between Admin queue and operational queue
880 		 */
881 		if (retval == min_vec)
882 			mrioc->op_reply_q_offset = 0;
883 		else if (retval != (max_vectors)) {
884 			ioc_info(mrioc,
885 			    "allocated vectors (%d) are less than configured (%d)\n",
886 			    retval, max_vectors);
887 		}
888 
889 		max_vectors = retval;
890 		mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
891 
892 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
893 
894 	}
895 
896 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
897 	    GFP_KERNEL);
898 	if (!mrioc->intr_info) {
899 		retval = -ENOMEM;
900 		pci_free_irq_vectors(mrioc->pdev);
901 		goto out_failed;
902 	}
903 	for (i = 0; i < max_vectors; i++) {
904 		retval = mpi3mr_request_irq(mrioc, i);
905 		if (retval) {
906 			mrioc->intr_info_count = i;
907 			goto out_failed;
908 		}
909 	}
910 	if (reset_devices || !setup_one)
911 		mrioc->is_intr_info_set = true;
912 	mrioc->intr_info_count = max_vectors;
913 	mpi3mr_ioc_enable_intr(mrioc);
914 	return 0;
915 
916 out_failed:
917 	mpi3mr_cleanup_isr(mrioc);
918 
919 	return retval;
920 }
921 
922 static const struct {
923 	enum mpi3mr_iocstate value;
924 	char *name;
925 } mrioc_states[] = {
926 	{ MRIOC_STATE_READY, "ready" },
927 	{ MRIOC_STATE_FAULT, "fault" },
928 	{ MRIOC_STATE_RESET, "reset" },
929 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
930 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
931 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
932 };
933 
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)934 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
935 {
936 	int i;
937 	char *name = NULL;
938 
939 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
940 		if (mrioc_states[i].value == mrioc_state) {
941 			name = mrioc_states[i].name;
942 			break;
943 		}
944 	}
945 	return name;
946 }
947 
948 /* Reset reason to name mapper structure*/
949 static const struct {
950 	enum mpi3mr_reset_reason value;
951 	char *name;
952 } mpi3mr_reset_reason_codes[] = {
953 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
954 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
955 	{ MPI3MR_RESET_FROM_APP, "application invocation" },
956 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
957 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
958 	{ MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
959 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
960 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
961 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
962 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
963 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
964 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
965 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
966 	{
967 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
968 		"create request queue timeout"
969 	},
970 	{
971 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
972 		"create reply queue timeout"
973 	},
974 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
975 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
976 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
977 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
978 	{
979 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
980 		"component image activation timeout"
981 	},
982 	{
983 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
984 		"get package version timeout"
985 	},
986 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
987 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
988 	{
989 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
990 		"diagnostic buffer post timeout"
991 	},
992 	{
993 		MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
994 		"diagnostic buffer release timeout"
995 	},
996 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
997 	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
998 	{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
999 };
1000 
1001 /**
1002  * mpi3mr_reset_rc_name - get reset reason code name
1003  * @reason_code: reset reason code value
1004  *
1005  * Map reset reason to an NULL terminated ASCII string
1006  *
1007  * Return: name corresponding to reset reason value or NULL.
1008  */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1009 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1010 {
1011 	int i;
1012 	char *name = NULL;
1013 
1014 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
1015 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1016 			name = mpi3mr_reset_reason_codes[i].name;
1017 			break;
1018 		}
1019 	}
1020 	return name;
1021 }
1022 
1023 /* Reset type to name mapper structure*/
1024 static const struct {
1025 	u16 reset_type;
1026 	char *name;
1027 } mpi3mr_reset_types[] = {
1028 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1029 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1030 };
1031 
1032 /**
1033  * mpi3mr_reset_type_name - get reset type name
1034  * @reset_type: reset type value
1035  *
1036  * Map reset type to an NULL terminated ASCII string
1037  *
1038  * Return: name corresponding to reset type value or NULL.
1039  */
mpi3mr_reset_type_name(u16 reset_type)1040 static const char *mpi3mr_reset_type_name(u16 reset_type)
1041 {
1042 	int i;
1043 	char *name = NULL;
1044 
1045 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1046 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1047 			name = mpi3mr_reset_types[i].name;
1048 			break;
1049 		}
1050 	}
1051 	return name;
1052 }
1053 
1054 /**
1055  * mpi3mr_is_fault_recoverable - Read fault code and decide
1056  * whether the controller can be recoverable
1057  * @mrioc: Adapter instance reference
1058  * Return: true if fault is recoverable, false otherwise.
1059  */
mpi3mr_is_fault_recoverable(struct mpi3mr_ioc * mrioc)1060 static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
1061 {
1062 	u32 fault;
1063 
1064 	fault = (readl(&mrioc->sysif_regs->fault) &
1065 		      MPI3_SYSIF_FAULT_CODE_MASK);
1066 
1067 	switch (fault) {
1068 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
1069 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
1070 		ioc_warn(mrioc,
1071 		    "controller requires system power cycle, marking controller as unrecoverable\n");
1072 		return false;
1073 	case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
1074 		ioc_warn(mrioc,
1075 		    "controller faulted due to insufficient power,\n"
1076 		    " try by connecting it to a different slot\n");
1077 		return false;
1078 	default:
1079 		break;
1080 	}
1081 	return true;
1082 }
1083 
1084 /**
1085  * mpi3mr_print_fault_info - Display fault information
1086  * @mrioc: Adapter instance reference
1087  *
1088  * Display the controller fault information if there is a
1089  * controller fault.
1090  *
1091  * Return: Nothing.
1092  */
mpi3mr_print_fault_info(struct mpi3mr_ioc * mrioc)1093 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1094 {
1095 	u32 ioc_status, code, code1, code2, code3;
1096 
1097 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1098 
1099 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1100 		code = readl(&mrioc->sysif_regs->fault);
1101 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1102 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1103 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1104 
1105 		ioc_info(mrioc,
1106 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1107 		    code, code1, code2, code3);
1108 	}
1109 }
1110 
1111 /**
1112  * mpi3mr_save_fault_info - Save fault information
1113  * @mrioc: Adapter instance reference
1114  *
1115  * Save the controller fault information if there is a
1116  * controller fault.
1117  *
1118  * Return: Nothing.
1119  */
mpi3mr_save_fault_info(struct mpi3mr_ioc * mrioc)1120 static void mpi3mr_save_fault_info(struct mpi3mr_ioc *mrioc)
1121 {
1122 	u32 ioc_status, i;
1123 
1124 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1125 
1126 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1127 		mrioc->saved_fault_code = readl(&mrioc->sysif_regs->fault) &
1128 		    MPI3_SYSIF_FAULT_CODE_MASK;
1129 		for (i = 0; i < 3; i++) {
1130 			mrioc->saved_fault_info[i] =
1131 			readl(&mrioc->sysif_regs->fault_info[i]);
1132 		}
1133 	}
1134 }
1135 
1136 /**
1137  * mpi3mr_get_iocstate - Get IOC State
1138  * @mrioc: Adapter instance reference
1139  *
1140  * Return a proper IOC state enum based on the IOC status and
1141  * IOC configuration and unrcoverable state of the controller.
1142  *
1143  * Return: Current IOC state.
1144  */
mpi3mr_get_iocstate(struct mpi3mr_ioc * mrioc)1145 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1146 {
1147 	u32 ioc_status, ioc_config;
1148 	u8 ready, enabled;
1149 
1150 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1151 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1152 
1153 	if (mrioc->unrecoverable)
1154 		return MRIOC_STATE_UNRECOVERABLE;
1155 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1156 		return MRIOC_STATE_FAULT;
1157 
1158 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1159 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1160 
1161 	if (ready && enabled)
1162 		return MRIOC_STATE_READY;
1163 	if ((!ready) && (!enabled))
1164 		return MRIOC_STATE_RESET;
1165 	if ((!ready) && (enabled))
1166 		return MRIOC_STATE_BECOMING_READY;
1167 
1168 	return MRIOC_STATE_RESET_REQUESTED;
1169 }
1170 
1171 /**
1172  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1173  * @mrioc: Adapter instance reference
1174  *
1175  * Free the DMA memory allocated for IOCTL handling purpose.
1176  *
1177  * Return: None
1178  */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1179 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1180 {
1181 	struct dma_memory_desc *mem_desc;
1182 	u16 i;
1183 
1184 	if (!mrioc->ioctl_dma_pool)
1185 		return;
1186 
1187 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1188 		mem_desc = &mrioc->ioctl_sge[i];
1189 		if (mem_desc->addr) {
1190 			dma_pool_free(mrioc->ioctl_dma_pool,
1191 				      mem_desc->addr,
1192 				      mem_desc->dma_addr);
1193 			mem_desc->addr = NULL;
1194 		}
1195 	}
1196 	dma_pool_destroy(mrioc->ioctl_dma_pool);
1197 	mrioc->ioctl_dma_pool = NULL;
1198 	mem_desc = &mrioc->ioctl_chain_sge;
1199 
1200 	if (mem_desc->addr) {
1201 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1202 				  mem_desc->addr, mem_desc->dma_addr);
1203 		mem_desc->addr = NULL;
1204 	}
1205 	mem_desc = &mrioc->ioctl_resp_sge;
1206 	if (mem_desc->addr) {
1207 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1208 				  mem_desc->addr, mem_desc->dma_addr);
1209 		mem_desc->addr = NULL;
1210 	}
1211 
1212 	mrioc->ioctl_sges_allocated = false;
1213 }
1214 
1215 /**
1216  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1217  * @mrioc: Adapter instance reference
1218  *
1219  * This function allocates dmaable memory required to handle the
1220  * application issued MPI3 IOCTL requests.
1221  *
1222  * Return: None
1223  */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1224 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1225 
1226 {
1227 	struct dma_memory_desc *mem_desc;
1228 	u16 i;
1229 
1230 	mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1231 						&mrioc->pdev->dev,
1232 						MPI3MR_IOCTL_SGE_SIZE,
1233 						MPI3MR_PAGE_SIZE_4K, 0);
1234 
1235 	if (!mrioc->ioctl_dma_pool) {
1236 		ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1237 		goto out_failed;
1238 	}
1239 
1240 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1241 		mem_desc = &mrioc->ioctl_sge[i];
1242 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1243 		mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1244 						 GFP_KERNEL,
1245 						 &mem_desc->dma_addr);
1246 		if (!mem_desc->addr)
1247 			goto out_failed;
1248 	}
1249 
1250 	mem_desc = &mrioc->ioctl_chain_sge;
1251 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1252 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1253 					    mem_desc->size,
1254 					    &mem_desc->dma_addr,
1255 					    GFP_KERNEL);
1256 	if (!mem_desc->addr)
1257 		goto out_failed;
1258 
1259 	mem_desc = &mrioc->ioctl_resp_sge;
1260 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1261 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1262 					    mem_desc->size,
1263 					    &mem_desc->dma_addr,
1264 					    GFP_KERNEL);
1265 	if (!mem_desc->addr)
1266 		goto out_failed;
1267 
1268 	mrioc->ioctl_sges_allocated = true;
1269 
1270 	return;
1271 out_failed:
1272 	ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1273 		 "from the applications, application interface for MPT command is disabled\n");
1274 	mpi3mr_free_ioctl_dma_memory(mrioc);
1275 }
1276 
1277 /**
1278  * mpi3mr_fault_uevent_emit - Emit uevent for any controller
1279  * fault
1280  * @mrioc: Pointer to the mpi3mr_ioc structure for the controller instance
1281  *
1282  * This function is invoked when the controller undergoes any
1283  * type of fault.
1284  */
1285 
mpi3mr_fault_uevent_emit(struct mpi3mr_ioc * mrioc)1286 static void mpi3mr_fault_uevent_emit(struct mpi3mr_ioc *mrioc)
1287 {
1288 	struct kobj_uevent_env *env;
1289 	int ret;
1290 
1291 	env = kzalloc_obj(*env);
1292 	if (!env)
1293 		return;
1294 
1295 	ret = add_uevent_var(env, "DRIVER=%s", mrioc->driver_name);
1296 	if (ret)
1297 		goto out_free;
1298 
1299 	ret = add_uevent_var(env, "IOC_ID=%u", mrioc->id);
1300 	if (ret)
1301 		goto out_free;
1302 
1303 	ret = add_uevent_var(env, "FAULT_CODE=0x%08x",
1304 			    mrioc->saved_fault_code);
1305 	if (ret)
1306 		goto out_free;
1307 
1308 	ret = add_uevent_var(env, "FAULT_INFO0=0x%08x",
1309 			     mrioc->saved_fault_info[0]);
1310 	if (ret)
1311 		goto out_free;
1312 
1313 	ret = add_uevent_var(env, "FAULT_INFO1=0x%08x",
1314 			    mrioc->saved_fault_info[1]);
1315 	if (ret)
1316 		goto out_free;
1317 
1318 	ret = add_uevent_var(env, "FAULT_INFO2=0x%08x",
1319 			    mrioc->saved_fault_info[2]);
1320 	if (ret)
1321 		goto out_free;
1322 
1323 	kobject_uevent_env(&mrioc->shost->shost_gendev.kobj,
1324 			KOBJ_CHANGE, env->envp);
1325 
1326 out_free:
1327 	kfree(env);
1328 
1329 }
1330 
1331 /**
1332  * mpi3mr_clear_reset_history - clear reset history
1333  * @mrioc: Adapter instance reference
1334  *
1335  * Write the reset history bit in IOC status to clear the bit,
1336  * if it is already set.
1337  *
1338  * Return: Nothing.
1339  */
mpi3mr_clear_reset_history(struct mpi3mr_ioc * mrioc)1340 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1341 {
1342 	u32 ioc_status;
1343 
1344 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1345 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1346 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1347 }
1348 
1349 /**
1350  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1351  * @mrioc: Adapter instance reference
1352  * @reset_reason: Reset reason code
1353  *
1354  * Issue Message unit Reset to the controller and wait for it to
1355  * be complete.
1356  *
1357  * Return: 0 on success, -1 on failure.
1358  */
mpi3mr_issue_and_process_mur(struct mpi3mr_ioc * mrioc,u32 reset_reason)1359 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1360 	u32 reset_reason)
1361 {
1362 	u32 ioc_config, timeout, ioc_status, scratch_pad0;
1363 	int retval = -1;
1364 
1365 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1366 	if (mrioc->unrecoverable) {
1367 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1368 		return retval;
1369 	}
1370 	mpi3mr_clear_reset_history(mrioc);
1371 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1372 			 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1373 			(mrioc->facts.ioc_num <<
1374 			 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1375 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1376 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1377 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1378 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1379 
1380 	timeout = MPI3MR_MUR_TIMEOUT * 10;
1381 	do {
1382 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1383 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1384 			mpi3mr_clear_reset_history(mrioc);
1385 			break;
1386 		}
1387 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1388 			mpi3mr_print_fault_info(mrioc);
1389 			break;
1390 		}
1391 		msleep(100);
1392 	} while (--timeout);
1393 
1394 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1395 	if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1396 	      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1397 	      (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1398 		retval = 0;
1399 
1400 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
1401 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1402 	return retval;
1403 }
1404 
1405 /**
1406  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1407  * during reset/resume
1408  * @mrioc: Adapter instance reference
1409  *
1410  * Return: zero if the new IOCFacts parameters value is compatible with
1411  * older values else return -EPERM
1412  */
1413 static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc * mrioc)1414 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1415 {
1416 	unsigned long *removepend_bitmap;
1417 
1418 	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1419 		ioc_err(mrioc,
1420 		    "cannot increase reply size from %d to %d\n",
1421 		    mrioc->reply_sz, mrioc->facts.reply_sz);
1422 		return -EPERM;
1423 	}
1424 
1425 	if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1426 		ioc_err(mrioc,
1427 		    "cannot reduce number of operational reply queues from %d to %d\n",
1428 		    mrioc->num_op_reply_q,
1429 		    mrioc->facts.max_op_reply_q);
1430 		return -EPERM;
1431 	}
1432 
1433 	if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1434 		ioc_err(mrioc,
1435 		    "cannot reduce number of operational request queues from %d to %d\n",
1436 		    mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1437 		return -EPERM;
1438 	}
1439 
1440 	if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1441 		ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1442 			    "\tchanged after reset: previous(%d), new(%d),\n"
1443 			    "the driver cannot change this at run time\n",
1444 			    mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1445 
1446 	if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1447 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1448 		ioc_err(mrioc,
1449 		    "critical error: multipath capability is enabled at the\n"
1450 		    "\tcontroller while sas transport support is enabled at the\n"
1451 		    "\tdriver, please reboot the system or reload the driver\n");
1452 
1453 	if (mrioc->seg_tb_support) {
1454 		if (!(mrioc->facts.ioc_capabilities &
1455 		     MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
1456 			ioc_err(mrioc,
1457 			    "critical error: previously enabled segmented trace\n"
1458 			    " buffer capability is disabled after reset. Please\n"
1459 			    " update the firmware or reboot the system or\n"
1460 			    " reload the driver to enable trace diag buffer\n");
1461 			mrioc->diag_buffers[0].disabled_after_reset = true;
1462 		} else
1463 			mrioc->diag_buffers[0].disabled_after_reset = false;
1464 	}
1465 
1466 	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1467 		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1468 						  GFP_KERNEL);
1469 		if (!removepend_bitmap) {
1470 			ioc_err(mrioc,
1471 				"failed to increase removepend_bitmap bits from %d to %d\n",
1472 				mrioc->dev_handle_bitmap_bits,
1473 				mrioc->facts.max_devhandle);
1474 			return -EPERM;
1475 		}
1476 		bitmap_free(mrioc->removepend_bitmap);
1477 		mrioc->removepend_bitmap = removepend_bitmap;
1478 		ioc_info(mrioc,
1479 			 "increased bits of dev_handle_bitmap from %d to %d\n",
1480 			 mrioc->dev_handle_bitmap_bits,
1481 			 mrioc->facts.max_devhandle);
1482 		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1483 	}
1484 
1485 	return 0;
1486 }
1487 
1488 /**
1489  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1490  * @mrioc: Adapter instance reference
1491  *
1492  * Set Enable IOC bit in IOC configuration register and wait for
1493  * the controller to become ready.
1494  *
1495  * Return: 0 on success, appropriate error on failure.
1496  */
mpi3mr_bring_ioc_ready(struct mpi3mr_ioc * mrioc)1497 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1498 {
1499 	u32 ioc_config, ioc_status, timeout, host_diagnostic;
1500 	int retval = 0;
1501 	enum mpi3mr_iocstate ioc_state;
1502 	u64 base_info;
1503 	u8 retry = 0;
1504 	u64 start_time, elapsed_time_sec;
1505 
1506 retry_bring_ioc_ready:
1507 
1508 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1509 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1510 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1511 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1512 	    ioc_status, ioc_config, base_info);
1513 
1514 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
1515 		mrioc->unrecoverable = 1;
1516 		goto out_device_not_present;
1517 	}
1518 
1519 	/*The timeout value is in 2sec unit, changing it to seconds*/
1520 	mrioc->ready_timeout =
1521 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1522 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1523 
1524 	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1525 
1526 	ioc_state = mpi3mr_get_iocstate(mrioc);
1527 	ioc_info(mrioc, "controller is in %s state during detection\n",
1528 	    mpi3mr_iocstate_name(ioc_state));
1529 
1530 	timeout = mrioc->ready_timeout * 10;
1531 
1532 	do {
1533 		ioc_state = mpi3mr_get_iocstate(mrioc);
1534 
1535 		if (ioc_state != MRIOC_STATE_BECOMING_READY &&
1536 		    ioc_state != MRIOC_STATE_RESET_REQUESTED)
1537 			break;
1538 
1539 		if (!pci_device_is_present(mrioc->pdev)) {
1540 			mrioc->unrecoverable = 1;
1541 			ioc_err(mrioc, "controller is not present while waiting to reset\n");
1542 			goto out_device_not_present;
1543 		}
1544 
1545 		msleep(100);
1546 	} while (--timeout);
1547 
1548 	if (ioc_state == MRIOC_STATE_READY) {
1549 		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1550 		retval = mpi3mr_issue_and_process_mur(mrioc,
1551 		    MPI3MR_RESET_FROM_BRINGUP);
1552 		ioc_state = mpi3mr_get_iocstate(mrioc);
1553 		if (retval)
1554 			ioc_err(mrioc,
1555 			    "message unit reset failed with error %d current state %s\n",
1556 			    retval, mpi3mr_iocstate_name(ioc_state));
1557 	}
1558 	if (ioc_state != MRIOC_STATE_RESET) {
1559 		if (ioc_state == MRIOC_STATE_FAULT) {
1560 			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1561 			mpi3mr_print_fault_info(mrioc);
1562 			mpi3mr_save_fault_info(mrioc);
1563 			mrioc->fault_during_init = 1;
1564 			mrioc->fwfault_counter++;
1565 
1566 			do {
1567 				host_diagnostic =
1568 					readl(&mrioc->sysif_regs->host_diagnostic);
1569 				if (!(host_diagnostic &
1570 				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1571 					break;
1572 				if (!pci_device_is_present(mrioc->pdev)) {
1573 					mrioc->unrecoverable = 1;
1574 					ioc_err(mrioc, "controller is not present at the bringup\n");
1575 					goto out_device_not_present;
1576 				}
1577 				msleep(100);
1578 			} while (--timeout);
1579 		}
1580 		mpi3mr_print_fault_info(mrioc);
1581 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1582 		retval = mpi3mr_issue_reset(mrioc,
1583 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1584 		    MPI3MR_RESET_FROM_BRINGUP);
1585 		if (retval) {
1586 			ioc_err(mrioc,
1587 			    "soft reset failed with error %d\n", retval);
1588 			goto out_failed;
1589 		}
1590 	}
1591 	ioc_state = mpi3mr_get_iocstate(mrioc);
1592 	if (ioc_state != MRIOC_STATE_RESET) {
1593 		ioc_err(mrioc,
1594 		    "cannot bring controller to reset state, current state: %s\n",
1595 		    mpi3mr_iocstate_name(ioc_state));
1596 		goto out_failed;
1597 	}
1598 	mpi3mr_clear_reset_history(mrioc);
1599 	retval = mpi3mr_setup_admin_qpair(mrioc);
1600 	if (retval) {
1601 		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1602 		    retval);
1603 		goto out_failed;
1604 	}
1605 
1606 	ioc_info(mrioc, "bringing controller to ready state\n");
1607 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1608 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1609 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1610 
1611 	if (retry == 0)
1612 		start_time = jiffies;
1613 
1614 	timeout = mrioc->ready_timeout * 10;
1615 	do {
1616 		ioc_state = mpi3mr_get_iocstate(mrioc);
1617 		if (ioc_state == MRIOC_STATE_READY) {
1618 			ioc_info(mrioc,
1619 			    "successfully transitioned to %s state\n",
1620 			    mpi3mr_iocstate_name(ioc_state));
1621 			return 0;
1622 		}
1623 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1624 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1625 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1626 			mpi3mr_print_fault_info(mrioc);
1627 			goto out_failed;
1628 		}
1629 		if (!pci_device_is_present(mrioc->pdev)) {
1630 			mrioc->unrecoverable = 1;
1631 			ioc_err(mrioc,
1632 			    "controller is not present at the bringup\n");
1633 			retval = -1;
1634 			goto out_device_not_present;
1635 		}
1636 		msleep(100);
1637 		elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1638 	} while (elapsed_time_sec < mrioc->ready_timeout);
1639 
1640 out_failed:
1641 	elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1642 	if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
1643 		retry++;
1644 
1645 		ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
1646 				" elapsed time =%llu\n", retry, elapsed_time_sec);
1647 
1648 		goto retry_bring_ioc_ready;
1649 	}
1650 	ioc_state = mpi3mr_get_iocstate(mrioc);
1651 	ioc_err(mrioc,
1652 	    "failed to bring to ready state,  current state: %s\n",
1653 	    mpi3mr_iocstate_name(ioc_state));
1654 out_device_not_present:
1655 	return retval;
1656 }
1657 
1658 /**
1659  * mpi3mr_soft_reset_success - Check softreset is success or not
1660  * @ioc_status: IOC status register value
1661  * @ioc_config: IOC config register value
1662  *
1663  * Check whether the soft reset is successful or not based on
1664  * IOC status and IOC config register values.
1665  *
1666  * Return: True when the soft reset is success, false otherwise.
1667  */
1668 static inline bool
mpi3mr_soft_reset_success(u32 ioc_status,u32 ioc_config)1669 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1670 {
1671 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1672 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1673 		return true;
1674 	return false;
1675 }
1676 
1677 /**
1678  * mpi3mr_diagfault_success - Check diag fault is success or not
1679  * @mrioc: Adapter reference
1680  * @ioc_status: IOC status register value
1681  *
1682  * Check whether the controller hit diag reset fault code.
1683  *
1684  * Return: True when there is diag fault, false otherwise.
1685  */
mpi3mr_diagfault_success(struct mpi3mr_ioc * mrioc,u32 ioc_status)1686 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1687 	u32 ioc_status)
1688 {
1689 	u32 fault;
1690 
1691 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1692 		return false;
1693 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1694 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1695 		mpi3mr_print_fault_info(mrioc);
1696 		return true;
1697 	}
1698 	return false;
1699 }
1700 
1701 /**
1702  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1703  * @mrioc: Adapter reference
1704  *
1705  * Set diag save bit in IOC configuration register to enable
1706  * snapdump.
1707  *
1708  * Return: Nothing.
1709  */
mpi3mr_set_diagsave(struct mpi3mr_ioc * mrioc)1710 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1711 {
1712 	u32 ioc_config;
1713 
1714 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1715 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1716 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1717 }
1718 
1719 /**
1720  * mpi3mr_issue_reset - Issue reset to the controller
1721  * @mrioc: Adapter reference
1722  * @reset_type: Reset type
1723  * @reset_reason: Reset reason code
1724  *
1725  * Unlock the host diagnostic registers and write the specific
1726  * reset type to that, wait for reset acknowledgment from the
1727  * controller, if the reset is not successful retry for the
1728  * predefined number of times.
1729  *
1730  * Return: 0 on success, non-zero on failure.
1731  */
mpi3mr_issue_reset(struct mpi3mr_ioc * mrioc,u16 reset_type,u16 reset_reason)1732 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1733 	u16 reset_reason)
1734 {
1735 	int retval = -1;
1736 	u8 unlock_retry_count = 0;
1737 	u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1738 	u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1739 
1740 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1741 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1742 		return retval;
1743 	if (mrioc->unrecoverable)
1744 		return retval;
1745 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1746 		retval = 0;
1747 		return retval;
1748 	}
1749 
1750 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1751 	    mpi3mr_reset_type_name(reset_type),
1752 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1753 
1754 	mpi3mr_clear_reset_history(mrioc);
1755 	do {
1756 		ioc_info(mrioc,
1757 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1758 		    ++unlock_retry_count);
1759 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1760 			ioc_err(mrioc,
1761 			    "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1762 			    mpi3mr_reset_type_name(reset_type),
1763 			    host_diagnostic);
1764 			mrioc->unrecoverable = 1;
1765 			return retval;
1766 		}
1767 
1768 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1769 		    &mrioc->sysif_regs->write_sequence);
1770 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1771 		    &mrioc->sysif_regs->write_sequence);
1772 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1773 		    &mrioc->sysif_regs->write_sequence);
1774 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1775 		    &mrioc->sysif_regs->write_sequence);
1776 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1777 		    &mrioc->sysif_regs->write_sequence);
1778 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1779 		    &mrioc->sysif_regs->write_sequence);
1780 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1781 		    &mrioc->sysif_regs->write_sequence);
1782 		usleep_range(1000, 1100);
1783 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1784 		ioc_info(mrioc,
1785 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1786 		    unlock_retry_count, host_diagnostic);
1787 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1788 
1789 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1790 	    MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1791 	    MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1792 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1793 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)
1794 		mpi3mr_set_diagsave(mrioc);
1795 	writel(host_diagnostic | reset_type,
1796 	    &mrioc->sysif_regs->host_diagnostic);
1797 	switch (reset_type) {
1798 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1799 		do {
1800 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1801 			ioc_config =
1802 			    readl(&mrioc->sysif_regs->ioc_configuration);
1803 			if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1804 			    && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1805 			    ) {
1806 				mpi3mr_clear_reset_history(mrioc);
1807 				retval = 0;
1808 				break;
1809 			}
1810 			msleep(100);
1811 		} while (--timeout);
1812 		mpi3mr_print_fault_info(mrioc);
1813 		break;
1814 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1815 		do {
1816 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1817 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1818 				retval = 0;
1819 				break;
1820 			}
1821 			msleep(100);
1822 		} while (--timeout);
1823 		break;
1824 	default:
1825 		break;
1826 	}
1827 
1828 	writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1829 	    &mrioc->sysif_regs->write_sequence);
1830 
1831 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1832 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1833 	ioc_info(mrioc,
1834 	    "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
1835 	    (!retval)?"successful":"failed", ioc_status,
1836 	    ioc_config);
1837 	if (retval)
1838 		mrioc->unrecoverable = 1;
1839 	return retval;
1840 }
1841 
1842 /**
1843  * mpi3mr_admin_request_post - Post request to admin queue
1844  * @mrioc: Adapter reference
1845  * @admin_req: MPI3 request
1846  * @admin_req_sz: Request size
1847  * @ignore_reset: Ignore reset in process
1848  *
1849  * Post the MPI3 request into admin request queue and
1850  * inform the controller, if the queue is full return
1851  * appropriate error.
1852  *
1853  * Return: 0 on success, non-zero on failure.
1854  */
mpi3mr_admin_request_post(struct mpi3mr_ioc * mrioc,void * admin_req,u16 admin_req_sz,u8 ignore_reset)1855 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1856 	u16 admin_req_sz, u8 ignore_reset)
1857 {
1858 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1859 	int retval = 0;
1860 	unsigned long flags;
1861 	u8 *areq_entry;
1862 
1863 	if (mrioc->unrecoverable) {
1864 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1865 		return -EFAULT;
1866 	}
1867 
1868 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1869 	areq_pi = mrioc->admin_req_pi;
1870 	areq_ci = mrioc->admin_req_ci;
1871 	max_entries = mrioc->num_admin_req;
1872 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1873 	    (areq_pi == (max_entries - 1)))) {
1874 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1875 		retval = -EAGAIN;
1876 		goto out;
1877 	}
1878 	if (!ignore_reset && mrioc->reset_in_progress) {
1879 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1880 		retval = -EAGAIN;
1881 		goto out;
1882 	}
1883 	if (mrioc->pci_err_recovery) {
1884 		ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1885 		retval = -EAGAIN;
1886 		goto out;
1887 	}
1888 
1889 	areq_entry = (u8 *)mrioc->admin_req_base +
1890 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1891 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1892 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1893 
1894 	if (++areq_pi == max_entries)
1895 		areq_pi = 0;
1896 	mrioc->admin_req_pi = areq_pi;
1897 
1898 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1899 
1900 out:
1901 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1902 
1903 	return retval;
1904 }
1905 
1906 /**
1907  * mpi3mr_free_op_req_q_segments - free request memory segments
1908  * @mrioc: Adapter instance reference
1909  * @q_idx: operational request queue index
1910  *
1911  * Free memory segments allocated for operational request queue
1912  *
1913  * Return: Nothing.
1914  */
mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1915 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1916 {
1917 	u16 j;
1918 	int size;
1919 	struct segments *segments;
1920 
1921 	segments = mrioc->req_qinfo[q_idx].q_segments;
1922 	if (!segments)
1923 		return;
1924 
1925 	if (mrioc->enable_segqueue) {
1926 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1927 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1928 			dma_free_coherent(&mrioc->pdev->dev,
1929 			    MPI3MR_MAX_SEG_LIST_SIZE,
1930 			    mrioc->req_qinfo[q_idx].q_segment_list,
1931 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1932 			mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1933 		}
1934 	} else
1935 		size = mrioc->req_qinfo[q_idx].segment_qd *
1936 		    mrioc->facts.op_req_sz;
1937 
1938 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1939 		if (!segments[j].segment)
1940 			continue;
1941 		dma_free_coherent(&mrioc->pdev->dev,
1942 		    size, segments[j].segment, segments[j].segment_dma);
1943 		segments[j].segment = NULL;
1944 	}
1945 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1946 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1947 	mrioc->req_qinfo[q_idx].qid = 0;
1948 }
1949 
1950 /**
1951  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1952  * @mrioc: Adapter instance reference
1953  * @q_idx: operational reply queue index
1954  *
1955  * Free memory segments allocated for operational reply queue
1956  *
1957  * Return: Nothing.
1958  */
mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1959 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1960 {
1961 	u16 j;
1962 	int size;
1963 	struct segments *segments;
1964 
1965 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1966 	if (!segments)
1967 		return;
1968 
1969 	if (mrioc->enable_segqueue) {
1970 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1971 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1972 			dma_free_coherent(&mrioc->pdev->dev,
1973 			    MPI3MR_MAX_SEG_LIST_SIZE,
1974 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1975 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1976 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1977 		}
1978 	} else
1979 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1980 		    mrioc->op_reply_desc_sz;
1981 
1982 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1983 		if (!segments[j].segment)
1984 			continue;
1985 		dma_free_coherent(&mrioc->pdev->dev,
1986 		    size, segments[j].segment, segments[j].segment_dma);
1987 		segments[j].segment = NULL;
1988 	}
1989 
1990 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1991 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1992 	mrioc->op_reply_qinfo[q_idx].qid = 0;
1993 }
1994 
1995 /**
1996  * mpi3mr_delete_op_reply_q - delete operational reply queue
1997  * @mrioc: Adapter instance reference
1998  * @qidx: operational reply queue index
1999  *
2000  * Delete operatinal reply queue by issuing MPI request
2001  * through admin queue.
2002  *
2003  * Return:  0 on success, non-zero on failure.
2004  */
mpi3mr_delete_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2005 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2006 {
2007 	struct mpi3_delete_reply_queue_request delq_req;
2008 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2009 	int retval = 0;
2010 	u16 reply_qid = 0, midx;
2011 
2012 	reply_qid = op_reply_q->qid;
2013 
2014 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2015 
2016 	if (!reply_qid)	{
2017 		retval = -1;
2018 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
2019 		goto out;
2020 	}
2021 
2022 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
2023 	    mrioc->active_poll_qcount--;
2024 
2025 	memset(&delq_req, 0, sizeof(delq_req));
2026 	mutex_lock(&mrioc->init_cmds.mutex);
2027 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2028 		retval = -1;
2029 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
2030 		mutex_unlock(&mrioc->init_cmds.mutex);
2031 		goto out;
2032 	}
2033 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2034 	mrioc->init_cmds.is_waiting = 1;
2035 	mrioc->init_cmds.callback = NULL;
2036 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2037 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
2038 	delq_req.queue_id = cpu_to_le16(reply_qid);
2039 
2040 	init_completion(&mrioc->init_cmds.done);
2041 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
2042 	    1);
2043 	if (retval) {
2044 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
2045 		goto out_unlock;
2046 	}
2047 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2048 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2049 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2050 		ioc_err(mrioc, "delete reply queue timed out\n");
2051 		mpi3mr_check_rh_fault_ioc(mrioc,
2052 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
2053 		retval = -1;
2054 		goto out_unlock;
2055 	}
2056 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2057 	    != MPI3_IOCSTATUS_SUCCESS) {
2058 		ioc_err(mrioc,
2059 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2060 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2061 		    mrioc->init_cmds.ioc_loginfo);
2062 		retval = -1;
2063 		goto out_unlock;
2064 	}
2065 	mrioc->intr_info[midx].op_reply_q = NULL;
2066 
2067 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2068 out_unlock:
2069 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2070 	mutex_unlock(&mrioc->init_cmds.mutex);
2071 out:
2072 
2073 	return retval;
2074 }
2075 
2076 /**
2077  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
2078  * @mrioc: Adapter instance reference
2079  * @qidx: request queue index
2080  *
2081  * Allocate segmented memory pools for operational reply
2082  * queue.
2083  *
2084  * Return: 0 on success, non-zero on failure.
2085  */
mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2086 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2087 {
2088 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2089 	int i, size;
2090 	u64 *q_segment_list_entry = NULL;
2091 	struct segments *segments;
2092 
2093 	if (mrioc->enable_segqueue) {
2094 		op_reply_q->segment_qd =
2095 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
2096 
2097 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
2098 
2099 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2100 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
2101 		    GFP_KERNEL);
2102 		if (!op_reply_q->q_segment_list)
2103 			return -ENOMEM;
2104 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
2105 	} else {
2106 		op_reply_q->segment_qd = op_reply_q->num_replies;
2107 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
2108 	}
2109 
2110 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
2111 	    op_reply_q->segment_qd);
2112 
2113 	op_reply_q->q_segments = kzalloc_objs(struct segments,
2114 					      op_reply_q->num_segments);
2115 	if (!op_reply_q->q_segments)
2116 		return -ENOMEM;
2117 
2118 	segments = op_reply_q->q_segments;
2119 	for (i = 0; i < op_reply_q->num_segments; i++) {
2120 		segments[i].segment =
2121 		    dma_alloc_coherent(&mrioc->pdev->dev,
2122 		    size, &segments[i].segment_dma, GFP_KERNEL);
2123 		if (!segments[i].segment)
2124 			return -ENOMEM;
2125 		if (mrioc->enable_segqueue)
2126 			q_segment_list_entry[i] =
2127 			    (unsigned long)segments[i].segment_dma;
2128 	}
2129 
2130 	return 0;
2131 }
2132 
2133 /**
2134  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
2135  * @mrioc: Adapter instance reference
2136  * @qidx: request queue index
2137  *
2138  * Allocate segmented memory pools for operational request
2139  * queue.
2140  *
2141  * Return: 0 on success, non-zero on failure.
2142  */
mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2143 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2144 {
2145 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
2146 	int i, size;
2147 	u64 *q_segment_list_entry = NULL;
2148 	struct segments *segments;
2149 
2150 	if (mrioc->enable_segqueue) {
2151 		op_req_q->segment_qd =
2152 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
2153 
2154 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
2155 
2156 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2157 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
2158 		    GFP_KERNEL);
2159 		if (!op_req_q->q_segment_list)
2160 			return -ENOMEM;
2161 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
2162 
2163 	} else {
2164 		op_req_q->segment_qd = op_req_q->num_requests;
2165 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
2166 	}
2167 
2168 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
2169 	    op_req_q->segment_qd);
2170 
2171 	op_req_q->q_segments = kzalloc_objs(struct segments,
2172 					    op_req_q->num_segments);
2173 	if (!op_req_q->q_segments)
2174 		return -ENOMEM;
2175 
2176 	segments = op_req_q->q_segments;
2177 	for (i = 0; i < op_req_q->num_segments; i++) {
2178 		segments[i].segment =
2179 		    dma_alloc_coherent(&mrioc->pdev->dev,
2180 		    size, &segments[i].segment_dma, GFP_KERNEL);
2181 		if (!segments[i].segment)
2182 			return -ENOMEM;
2183 		if (mrioc->enable_segqueue)
2184 			q_segment_list_entry[i] =
2185 			    (unsigned long)segments[i].segment_dma;
2186 	}
2187 
2188 	return 0;
2189 }
2190 
2191 /**
2192  * mpi3mr_create_op_reply_q - create operational reply queue
2193  * @mrioc: Adapter instance reference
2194  * @qidx: operational reply queue index
2195  *
2196  * Create operatinal reply queue by issuing MPI request
2197  * through admin queue.
2198  *
2199  * Return:  0 on success, non-zero on failure.
2200  */
mpi3mr_create_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2201 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2202 {
2203 	struct mpi3_create_reply_queue_request create_req;
2204 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2205 	int retval = 0;
2206 	u16 reply_qid = 0, midx;
2207 
2208 	reply_qid = op_reply_q->qid;
2209 
2210 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2211 
2212 	if (reply_qid) {
2213 		retval = -1;
2214 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2215 		    reply_qid);
2216 
2217 		return retval;
2218 	}
2219 
2220 	reply_qid = qidx + 1;
2221 
2222 	if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
2223 		if (mrioc->pdev->revision)
2224 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2225 		else
2226 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2227 	} else
2228 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
2229 
2230 	op_reply_q->ci = 0;
2231 	op_reply_q->ephase = 1;
2232 	atomic_set(&op_reply_q->pend_ios, 0);
2233 	atomic_set(&op_reply_q->in_use, 0);
2234 	op_reply_q->enable_irq_poll = false;
2235 	op_reply_q->qfull_watermark =
2236 		op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
2237 
2238 	if (!op_reply_q->q_segments) {
2239 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2240 		if (retval) {
2241 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2242 			goto out;
2243 		}
2244 	}
2245 
2246 	memset(&create_req, 0, sizeof(create_req));
2247 	mutex_lock(&mrioc->init_cmds.mutex);
2248 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2249 		retval = -1;
2250 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2251 		goto out_unlock;
2252 	}
2253 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2254 	mrioc->init_cmds.is_waiting = 1;
2255 	mrioc->init_cmds.callback = NULL;
2256 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2257 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2258 	create_req.queue_id = cpu_to_le16(reply_qid);
2259 
2260 	if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2261 		op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2262 	else
2263 		op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2264 
2265 	if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2266 		create_req.flags =
2267 			MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2268 		create_req.msix_index =
2269 			cpu_to_le16(mrioc->intr_info[midx].msix_index);
2270 	} else {
2271 		create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2272 		ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2273 			reply_qid, midx);
2274 		if (!mrioc->active_poll_qcount)
2275 			disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2276 			    mrioc->intr_info_count - 1));
2277 	}
2278 
2279 	if (mrioc->enable_segqueue) {
2280 		create_req.flags |=
2281 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2282 		create_req.base_address = cpu_to_le64(
2283 		    op_reply_q->q_segment_list_dma);
2284 	} else
2285 		create_req.base_address = cpu_to_le64(
2286 		    op_reply_q->q_segments[0].segment_dma);
2287 
2288 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
2289 
2290 	init_completion(&mrioc->init_cmds.done);
2291 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2292 	    sizeof(create_req), 1);
2293 	if (retval) {
2294 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2295 		goto out_unlock;
2296 	}
2297 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2298 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2299 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2300 		ioc_err(mrioc, "create reply queue timed out\n");
2301 		mpi3mr_check_rh_fault_ioc(mrioc,
2302 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2303 		retval = -1;
2304 		goto out_unlock;
2305 	}
2306 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2307 	    != MPI3_IOCSTATUS_SUCCESS) {
2308 		ioc_err(mrioc,
2309 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2310 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2311 		    mrioc->init_cmds.ioc_loginfo);
2312 		retval = -1;
2313 		goto out_unlock;
2314 	}
2315 	op_reply_q->qid = reply_qid;
2316 	if (midx < mrioc->intr_info_count)
2317 		mrioc->intr_info[midx].op_reply_q = op_reply_q;
2318 
2319 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2320 	    mrioc->active_poll_qcount++;
2321 
2322 out_unlock:
2323 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2324 	mutex_unlock(&mrioc->init_cmds.mutex);
2325 out:
2326 
2327 	return retval;
2328 }
2329 
2330 /**
2331  * mpi3mr_create_op_req_q - create operational request queue
2332  * @mrioc: Adapter instance reference
2333  * @idx: operational request queue index
2334  * @reply_qid: Reply queue ID
2335  *
2336  * Create operatinal request queue by issuing MPI request
2337  * through admin queue.
2338  *
2339  * Return:  0 on success, non-zero on failure.
2340  */
mpi3mr_create_op_req_q(struct mpi3mr_ioc * mrioc,u16 idx,u16 reply_qid)2341 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2342 	u16 reply_qid)
2343 {
2344 	struct mpi3_create_request_queue_request create_req;
2345 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2346 	int retval = 0;
2347 	u16 req_qid = 0;
2348 
2349 	req_qid = op_req_q->qid;
2350 
2351 	if (req_qid) {
2352 		retval = -1;
2353 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2354 		    req_qid);
2355 
2356 		return retval;
2357 	}
2358 	req_qid = idx + 1;
2359 
2360 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2361 	op_req_q->ci = 0;
2362 	op_req_q->pi = 0;
2363 	op_req_q->reply_qid = reply_qid;
2364 	spin_lock_init(&op_req_q->q_lock);
2365 
2366 	if (!op_req_q->q_segments) {
2367 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2368 		if (retval) {
2369 			mpi3mr_free_op_req_q_segments(mrioc, idx);
2370 			goto out;
2371 		}
2372 	}
2373 
2374 	memset(&create_req, 0, sizeof(create_req));
2375 	mutex_lock(&mrioc->init_cmds.mutex);
2376 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2377 		retval = -1;
2378 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2379 		goto out_unlock;
2380 	}
2381 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2382 	mrioc->init_cmds.is_waiting = 1;
2383 	mrioc->init_cmds.callback = NULL;
2384 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2385 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2386 	create_req.queue_id = cpu_to_le16(req_qid);
2387 	if (mrioc->enable_segqueue) {
2388 		create_req.flags =
2389 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2390 		create_req.base_address = cpu_to_le64(
2391 		    op_req_q->q_segment_list_dma);
2392 	} else
2393 		create_req.base_address = cpu_to_le64(
2394 		    op_req_q->q_segments[0].segment_dma);
2395 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
2396 	create_req.size = cpu_to_le16(op_req_q->num_requests);
2397 
2398 	init_completion(&mrioc->init_cmds.done);
2399 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2400 	    sizeof(create_req), 1);
2401 	if (retval) {
2402 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2403 		goto out_unlock;
2404 	}
2405 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2406 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2407 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2408 		ioc_err(mrioc, "create request queue timed out\n");
2409 		mpi3mr_check_rh_fault_ioc(mrioc,
2410 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2411 		retval = -1;
2412 		goto out_unlock;
2413 	}
2414 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2415 	    != MPI3_IOCSTATUS_SUCCESS) {
2416 		ioc_err(mrioc,
2417 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2418 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2419 		    mrioc->init_cmds.ioc_loginfo);
2420 		retval = -1;
2421 		goto out_unlock;
2422 	}
2423 	op_req_q->qid = req_qid;
2424 
2425 out_unlock:
2426 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2427 	mutex_unlock(&mrioc->init_cmds.mutex);
2428 out:
2429 
2430 	return retval;
2431 }
2432 
2433 /**
2434  * mpi3mr_create_op_queues - create operational queue pairs
2435  * @mrioc: Adapter instance reference
2436  *
2437  * Allocate memory for operational queue meta data and call
2438  * create request and reply queue functions.
2439  *
2440  * Return: 0 on success, non-zero on failures.
2441  */
mpi3mr_create_op_queues(struct mpi3mr_ioc * mrioc)2442 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2443 {
2444 	int retval = 0;
2445 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2446 	u32 ioc_status;
2447 	enum mpi3mr_iocstate ioc_state;
2448 
2449 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2450 	    mrioc->facts.max_op_req_q);
2451 
2452 	msix_count_op_q =
2453 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
2454 	if (!mrioc->num_queues)
2455 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2456 	/*
2457 	 * During reset set the num_queues to the number of queues
2458 	 * that was set before the reset.
2459 	 */
2460 	num_queues = mrioc->num_op_reply_q ?
2461 	    mrioc->num_op_reply_q : mrioc->num_queues;
2462 	ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2463 	    num_queues);
2464 
2465 	if (!mrioc->req_qinfo) {
2466 		mrioc->req_qinfo = kzalloc_objs(struct op_req_qinfo, num_queues);
2467 		if (!mrioc->req_qinfo) {
2468 			retval = -1;
2469 			goto out_failed;
2470 		}
2471 
2472 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2473 		    num_queues, GFP_KERNEL);
2474 		if (!mrioc->op_reply_qinfo) {
2475 			retval = -1;
2476 			goto out_failed;
2477 		}
2478 	}
2479 
2480 	if (mrioc->enable_segqueue)
2481 		ioc_info(mrioc,
2482 		    "allocating operational queues through segmented queues\n");
2483 
2484 	for (i = 0; i < num_queues; i++) {
2485 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
2486 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2487 			break;
2488 		}
2489 		if (mpi3mr_create_op_req_q(mrioc, i,
2490 		    mrioc->op_reply_qinfo[i].qid)) {
2491 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2492 			mpi3mr_delete_op_reply_q(mrioc, i);
2493 			break;
2494 		}
2495 	}
2496 
2497 	if (i == 0) {
2498 		/* Not even one queue is created successfully*/
2499 		retval = -1;
2500 		goto out_failed;
2501 	}
2502 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2503 	ioc_state = mpi3mr_get_iocstate(mrioc);
2504 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2505 	    ioc_state != MRIOC_STATE_READY) {
2506 		mpi3mr_print_fault_info(mrioc);
2507 		retval = -1;
2508 		goto out_failed;
2509 	}
2510 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2511 	ioc_info(mrioc,
2512 	    "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2513 	    mrioc->num_op_reply_q, mrioc->default_qcount,
2514 	    mrioc->active_poll_qcount);
2515 
2516 	return retval;
2517 out_failed:
2518 	kfree(mrioc->req_qinfo);
2519 	mrioc->req_qinfo = NULL;
2520 
2521 	kfree(mrioc->op_reply_qinfo);
2522 	mrioc->op_reply_qinfo = NULL;
2523 
2524 	return retval;
2525 }
2526 
2527 /**
2528  * mpi3mr_op_request_post - Post request to operational queue
2529  * @mrioc: Adapter reference
2530  * @op_req_q: Operational request queue info
2531  * @req: MPI3 request
2532  *
2533  * Post the MPI3 request into operational request queue and
2534  * inform the controller, if the queue is full return
2535  * appropriate error.
2536  *
2537  * Return: 0 on success, non-zero on failure.
2538  */
mpi3mr_op_request_post(struct mpi3mr_ioc * mrioc,struct op_req_qinfo * op_req_q,u8 * req)2539 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2540 	struct op_req_qinfo *op_req_q, u8 *req)
2541 {
2542 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
2543 	int retval = 0;
2544 	unsigned long flags;
2545 	u8 *req_entry;
2546 	void *segment_base_addr;
2547 	u16 req_sz = mrioc->facts.op_req_sz;
2548 	struct segments *segments = op_req_q->q_segments;
2549 	struct op_reply_qinfo *op_reply_q = NULL;
2550 
2551 	reply_qidx = op_req_q->reply_qid - 1;
2552 	op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
2553 
2554 	if (mrioc->unrecoverable)
2555 		return -EFAULT;
2556 
2557 	spin_lock_irqsave(&op_req_q->q_lock, flags);
2558 	pi = op_req_q->pi;
2559 	max_entries = op_req_q->num_requests;
2560 
2561 	if (mpi3mr_check_req_qfull(op_req_q)) {
2562 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2563 		    reply_qidx, mrioc->op_reply_q_offset);
2564 		mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2565 
2566 		if (mpi3mr_check_req_qfull(op_req_q)) {
2567 			retval = -EAGAIN;
2568 			goto out;
2569 		}
2570 	}
2571 
2572 	if (mrioc->reset_in_progress) {
2573 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2574 		retval = -EAGAIN;
2575 		goto out;
2576 	}
2577 	if (mrioc->pci_err_recovery) {
2578 		ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2579 		retval = -EAGAIN;
2580 		goto out;
2581 	}
2582 
2583 	/* Reply queue is nearing to get full, push back IOs to SML */
2584 	if ((mrioc->prevent_reply_qfull == true) &&
2585 		(atomic_read(&op_reply_q->pend_ios) >
2586 	     (op_reply_q->qfull_watermark))) {
2587 		atomic_inc(&mrioc->reply_qfull_count);
2588 		retval = -EAGAIN;
2589 		goto out;
2590 	}
2591 
2592 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2593 	req_entry = (u8 *)segment_base_addr +
2594 	    ((pi % op_req_q->segment_qd) * req_sz);
2595 
2596 	memset(req_entry, 0, req_sz);
2597 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2598 
2599 	if (++pi == max_entries)
2600 		pi = 0;
2601 	op_req_q->pi = pi;
2602 
2603 #ifndef CONFIG_PREEMPT_RT
2604 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2605 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2606 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2607 #else
2608 	atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2609 #endif
2610 
2611 	writel(op_req_q->pi,
2612 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2613 
2614 out:
2615 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2616 	return retval;
2617 }
2618 
2619 /**
2620  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2621  * controller
2622  * @mrioc: Adapter instance reference
2623  * @reason_code: reason code for the fault.
2624  *
2625  * This routine will save snapdump and fault the controller with
2626  * the given reason code if it is not already in the fault or
2627  * not asynchronosuly reset. This will be used to handle
2628  * initilaization time faults/resets/timeout as in those cases
2629  * immediate soft reset invocation is not required.
2630  *
2631  * Return:  None.
2632  */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc * mrioc,u32 reason_code)2633 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2634 {
2635 	u32 ioc_status, host_diagnostic, timeout;
2636 	union mpi3mr_trigger_data trigger_data;
2637 
2638 	if (mrioc->unrecoverable) {
2639 		ioc_err(mrioc, "controller is unrecoverable\n");
2640 		return;
2641 	}
2642 
2643 	if (!pci_device_is_present(mrioc->pdev)) {
2644 		mrioc->unrecoverable = 1;
2645 		ioc_err(mrioc, "controller is not present\n");
2646 		return;
2647 	}
2648 	memset(&trigger_data, 0, sizeof(trigger_data));
2649 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2650 
2651 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2652 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2653 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2654 		return;
2655 	} else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2656 		trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2657 		      MPI3_SYSIF_FAULT_CODE_MASK);
2658 
2659 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2660 		    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2661 		mpi3mr_print_fault_info(mrioc);
2662 		mpi3mr_save_fault_info(mrioc);
2663 		mrioc->fault_during_init = 1;
2664 		mrioc->fwfault_counter++;
2665 		return;
2666 	}
2667 
2668 	mpi3mr_set_diagsave(mrioc);
2669 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2670 	    reason_code);
2671 	trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2672 		      MPI3_SYSIF_FAULT_CODE_MASK);
2673 	mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2674 	    &trigger_data, 0);
2675 	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2676 	do {
2677 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2678 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2679 			break;
2680 		msleep(100);
2681 	} while (--timeout);
2682 
2683 	mpi3mr_save_fault_info(mrioc);
2684 	mrioc->fault_during_init = 1;
2685 	mrioc->fwfault_counter++;
2686 }
2687 
2688 /**
2689  * mpi3mr_sync_timestamp - Issue time stamp sync request
2690  * @mrioc: Adapter reference
2691  *
2692  * Issue IO unit control MPI request to synchornize firmware
2693  * timestamp with host time.
2694  *
2695  * Return: 0 on success, non-zero on failure.
2696  */
mpi3mr_sync_timestamp(struct mpi3mr_ioc * mrioc)2697 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2698 {
2699 	ktime_t current_time;
2700 	struct mpi3_iounit_control_request iou_ctrl;
2701 	int retval = 0;
2702 
2703 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2704 	mutex_lock(&mrioc->init_cmds.mutex);
2705 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2706 		retval = -1;
2707 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2708 		mutex_unlock(&mrioc->init_cmds.mutex);
2709 		goto out;
2710 	}
2711 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2712 	mrioc->init_cmds.is_waiting = 1;
2713 	mrioc->init_cmds.callback = NULL;
2714 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2715 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2716 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2717 	current_time = ktime_get_real();
2718 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2719 
2720 	init_completion(&mrioc->init_cmds.done);
2721 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2722 	    sizeof(iou_ctrl), 0);
2723 	if (retval) {
2724 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2725 		goto out_unlock;
2726 	}
2727 
2728 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2729 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2730 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2731 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2732 		mrioc->init_cmds.is_waiting = 0;
2733 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2734 			mpi3mr_check_rh_fault_ioc(mrioc,
2735 			    MPI3MR_RESET_FROM_TSU_TIMEOUT);
2736 		retval = -1;
2737 		goto out_unlock;
2738 	}
2739 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2740 	    != MPI3_IOCSTATUS_SUCCESS) {
2741 		ioc_err(mrioc,
2742 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2743 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2744 		    mrioc->init_cmds.ioc_loginfo);
2745 		retval = -1;
2746 		goto out_unlock;
2747 	}
2748 
2749 out_unlock:
2750 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2751 	mutex_unlock(&mrioc->init_cmds.mutex);
2752 
2753 out:
2754 	return retval;
2755 }
2756 
2757 /**
2758  * mpi3mr_print_pkg_ver - display controller fw package version
2759  * @mrioc: Adapter reference
2760  *
2761  * Retrieve firmware package version from the component image
2762  * header of the controller flash and display it.
2763  *
2764  * Return: 0 on success and non-zero on failure.
2765  */
mpi3mr_print_pkg_ver(struct mpi3mr_ioc * mrioc)2766 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2767 {
2768 	struct mpi3_ci_upload_request ci_upload;
2769 	int retval = -1;
2770 	void *data = NULL;
2771 	dma_addr_t data_dma;
2772 	struct mpi3_ci_manifest_mpi *manifest;
2773 	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2774 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2775 
2776 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2777 	    GFP_KERNEL);
2778 	if (!data)
2779 		return -ENOMEM;
2780 
2781 	memset(&ci_upload, 0, sizeof(ci_upload));
2782 	mutex_lock(&mrioc->init_cmds.mutex);
2783 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2784 		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2785 		mutex_unlock(&mrioc->init_cmds.mutex);
2786 		goto out;
2787 	}
2788 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2789 	mrioc->init_cmds.is_waiting = 1;
2790 	mrioc->init_cmds.callback = NULL;
2791 	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2792 	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2793 	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2794 	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2795 	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2796 	ci_upload.segment_size = cpu_to_le32(data_len);
2797 
2798 	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2799 	    data_dma);
2800 	init_completion(&mrioc->init_cmds.done);
2801 	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2802 	    sizeof(ci_upload), 1);
2803 	if (retval) {
2804 		ioc_err(mrioc, "posting get package version failed\n");
2805 		goto out_unlock;
2806 	}
2807 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2808 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2809 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2810 		ioc_err(mrioc, "get package version timed out\n");
2811 		mpi3mr_check_rh_fault_ioc(mrioc,
2812 		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2813 		retval = -1;
2814 		goto out_unlock;
2815 	}
2816 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2817 	    == MPI3_IOCSTATUS_SUCCESS) {
2818 		manifest = (struct mpi3_ci_manifest_mpi *) data;
2819 		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2820 			ioc_info(mrioc,
2821 			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2822 			    manifest->package_version.gen_major,
2823 			    manifest->package_version.gen_minor,
2824 			    manifest->package_version.phase_major,
2825 			    manifest->package_version.phase_minor,
2826 			    manifest->package_version.customer_id,
2827 			    manifest->package_version.build_num);
2828 		}
2829 	}
2830 	retval = 0;
2831 out_unlock:
2832 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2833 	mutex_unlock(&mrioc->init_cmds.mutex);
2834 
2835 out:
2836 	if (data)
2837 		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2838 		    data_dma);
2839 	return retval;
2840 }
2841 
2842 /**
2843  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2844  * @work: work struct
2845  *
2846  * Watch dog work periodically executed (1 second interval) to
2847  * monitor firmware fault and to issue periodic timer sync to
2848  * the firmware.
2849  *
2850  * Return: Nothing.
2851  */
mpi3mr_watchdog_work(struct work_struct * work)2852 static void mpi3mr_watchdog_work(struct work_struct *work)
2853 {
2854 	struct mpi3mr_ioc *mrioc =
2855 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2856 	unsigned long flags;
2857 	enum mpi3mr_iocstate ioc_state;
2858 	u32 host_diagnostic, ioc_status;
2859 	union mpi3mr_trigger_data trigger_data;
2860 	u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2861 
2862 	if (mrioc->fault_during_init) {
2863 		mpi3mr_fault_uevent_emit(mrioc);
2864 		mrioc->fault_during_init = 0;
2865 	}
2866 
2867 	if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2868 		return;
2869 
2870 	if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2871 		ioc_err(mrioc, "watchdog could not detect the controller\n");
2872 		mrioc->unrecoverable = 1;
2873 	}
2874 
2875 	if (mrioc->unrecoverable) {
2876 		ioc_err(mrioc,
2877 		    "flush pending commands for unrecoverable controller\n");
2878 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2879 		return;
2880 	}
2881 
2882 	if (atomic_read(&mrioc->admin_pend_isr)) {
2883 		ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
2884 				"flush admin replies\n");
2885 		mpi3mr_process_admin_reply_q(mrioc);
2886 	}
2887 
2888 	if (!(mrioc->facts.ioc_capabilities &
2889 		MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
2890 		(mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
2891 
2892 		mrioc->ts_update_counter = 0;
2893 		mpi3mr_sync_timestamp(mrioc);
2894 	}
2895 
2896 	if ((mrioc->prepare_for_reset) &&
2897 	    ((mrioc->prepare_for_reset_timeout_counter++) >=
2898 	     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2899 		mpi3mr_soft_reset_handler(mrioc,
2900 		    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2901 		return;
2902 	}
2903 
2904 	memset(&trigger_data, 0, sizeof(trigger_data));
2905 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2906 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2907 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2908 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2909 		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2910 		return;
2911 	}
2912 
2913 	/*Check for fault state every one second and issue Soft reset*/
2914 	ioc_state = mpi3mr_get_iocstate(mrioc);
2915 	if (ioc_state != MRIOC_STATE_FAULT)
2916 		goto schedule_work;
2917 
2918 	trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2919 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2920 	    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2921 	host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2922 	if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2923 		if (!mrioc->diagsave_timeout) {
2924 			mpi3mr_print_fault_info(mrioc);
2925 			ioc_warn(mrioc, "diag save in progress\n");
2926 		}
2927 		if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2928 			goto schedule_work;
2929 	}
2930 
2931 	mpi3mr_print_fault_info(mrioc);
2932 	mrioc->diagsave_timeout = 0;
2933 
2934 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
2935 		mrioc->unrecoverable = 1;
2936 		goto schedule_work;
2937 	}
2938 
2939 	mpi3mr_save_fault_info(mrioc);
2940 	mpi3mr_fault_uevent_emit(mrioc);
2941 	mrioc->fwfault_counter++;
2942 
2943 	switch (trigger_data.fault) {
2944 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2945 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2946 		ioc_warn(mrioc,
2947 		    "controller requires system power cycle, marking controller as unrecoverable\n");
2948 		mrioc->unrecoverable = 1;
2949 		goto schedule_work;
2950 	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2951 		goto schedule_work;
2952 	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2953 		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2954 		break;
2955 	default:
2956 		break;
2957 	}
2958 	mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2959 	return;
2960 
2961 schedule_work:
2962 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2963 	if (mrioc->watchdog_work_q)
2964 		queue_delayed_work(mrioc->watchdog_work_q,
2965 		    &mrioc->watchdog_work,
2966 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2967 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2968 	return;
2969 }
2970 
2971 /**
2972  * mpi3mr_start_watchdog - Start watchdog
2973  * @mrioc: Adapter instance reference
2974  *
2975  * Create and start the watchdog thread to monitor controller
2976  * faults.
2977  *
2978  * Return: Nothing.
2979  */
mpi3mr_start_watchdog(struct mpi3mr_ioc * mrioc)2980 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2981 {
2982 	if (mrioc->watchdog_work_q)
2983 		return;
2984 
2985 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2986 	mrioc->watchdog_work_q = alloc_ordered_workqueue(
2987 		"watchdog_%s%d", WQ_MEM_RECLAIM, mrioc->name, mrioc->id);
2988 	if (!mrioc->watchdog_work_q) {
2989 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2990 		return;
2991 	}
2992 
2993 	if (mrioc->watchdog_work_q)
2994 		queue_delayed_work(mrioc->watchdog_work_q,
2995 		    &mrioc->watchdog_work,
2996 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2997 }
2998 
2999 /**
3000  * mpi3mr_stop_watchdog - Stop watchdog
3001  * @mrioc: Adapter instance reference
3002  *
3003  * Stop the watchdog thread created to monitor controller
3004  * faults.
3005  *
3006  * Return: Nothing.
3007  */
mpi3mr_stop_watchdog(struct mpi3mr_ioc * mrioc)3008 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
3009 {
3010 	unsigned long flags;
3011 	struct workqueue_struct *wq;
3012 
3013 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
3014 	wq = mrioc->watchdog_work_q;
3015 	mrioc->watchdog_work_q = NULL;
3016 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
3017 	if (wq) {
3018 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
3019 			flush_workqueue(wq);
3020 		destroy_workqueue(wq);
3021 	}
3022 }
3023 
3024 /**
3025  * mpi3mr_setup_admin_qpair - Setup admin queue pair
3026  * @mrioc: Adapter instance reference
3027  *
3028  * Allocate memory for admin queue pair if required and register
3029  * the admin queue with the controller.
3030  *
3031  * Return: 0 on success, non-zero on failures.
3032  */
mpi3mr_setup_admin_qpair(struct mpi3mr_ioc * mrioc)3033 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
3034 {
3035 	int retval = 0;
3036 	u32 num_admin_entries = 0;
3037 
3038 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
3039 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
3040 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
3041 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
3042 
3043 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
3044 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
3045 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
3046 	mrioc->admin_reply_ci = 0;
3047 	mrioc->admin_reply_ephase = 1;
3048 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
3049 	atomic_set(&mrioc->admin_pend_isr, 0);
3050 
3051 	if (!mrioc->admin_req_base) {
3052 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
3053 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
3054 
3055 		if (!mrioc->admin_req_base) {
3056 			retval = -1;
3057 			goto out_failed;
3058 		}
3059 
3060 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
3061 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
3062 		    GFP_KERNEL);
3063 
3064 		if (!mrioc->admin_reply_base) {
3065 			retval = -1;
3066 			goto out_failed;
3067 		}
3068 	}
3069 
3070 	num_admin_entries = (mrioc->num_admin_replies << 16) |
3071 	    (mrioc->num_admin_req);
3072 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
3073 	mpi3mr_writeq(mrioc->admin_req_dma,
3074 		&mrioc->sysif_regs->admin_request_queue_address,
3075 		&mrioc->adm_req_q_bar_writeq_lock);
3076 	mpi3mr_writeq(mrioc->admin_reply_dma,
3077 		&mrioc->sysif_regs->admin_reply_queue_address,
3078 		&mrioc->adm_reply_q_bar_writeq_lock);
3079 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
3080 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
3081 	return retval;
3082 
3083 out_failed:
3084 
3085 	if (mrioc->admin_reply_base) {
3086 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
3087 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
3088 		mrioc->admin_reply_base = NULL;
3089 	}
3090 	if (mrioc->admin_req_base) {
3091 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
3092 		    mrioc->admin_req_base, mrioc->admin_req_dma);
3093 		mrioc->admin_req_base = NULL;
3094 	}
3095 	return retval;
3096 }
3097 
3098 /**
3099  * mpi3mr_issue_iocfacts - Send IOC Facts
3100  * @mrioc: Adapter instance reference
3101  * @facts_data: Cached IOC facts data
3102  *
3103  * Issue IOC Facts MPI request through admin queue and wait for
3104  * the completion of it or time out.
3105  *
3106  * Return: 0 on success, non-zero on failures.
3107  */
mpi3mr_issue_iocfacts(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3108 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
3109 	struct mpi3_ioc_facts_data *facts_data)
3110 {
3111 	struct mpi3_ioc_facts_request iocfacts_req;
3112 	void *data = NULL;
3113 	dma_addr_t data_dma;
3114 	u32 data_len = sizeof(*facts_data);
3115 	int retval = 0;
3116 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
3117 
3118 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3119 	    GFP_KERNEL);
3120 
3121 	if (!data) {
3122 		retval = -1;
3123 		goto out;
3124 	}
3125 
3126 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
3127 	mutex_lock(&mrioc->init_cmds.mutex);
3128 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3129 		retval = -1;
3130 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
3131 		mutex_unlock(&mrioc->init_cmds.mutex);
3132 		goto out;
3133 	}
3134 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3135 	mrioc->init_cmds.is_waiting = 1;
3136 	mrioc->init_cmds.callback = NULL;
3137 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3138 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
3139 
3140 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
3141 	    data_dma);
3142 
3143 	init_completion(&mrioc->init_cmds.done);
3144 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
3145 	    sizeof(iocfacts_req), 1);
3146 	if (retval) {
3147 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
3148 		goto out_unlock;
3149 	}
3150 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3151 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3152 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3153 		ioc_err(mrioc, "ioc_facts timed out\n");
3154 		mpi3mr_check_rh_fault_ioc(mrioc,
3155 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
3156 		retval = -1;
3157 		goto out_unlock;
3158 	}
3159 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3160 	    != MPI3_IOCSTATUS_SUCCESS) {
3161 		ioc_err(mrioc,
3162 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3163 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3164 		    mrioc->init_cmds.ioc_loginfo);
3165 		retval = -1;
3166 		goto out_unlock;
3167 	}
3168 	memcpy(facts_data, (u8 *)data, data_len);
3169 	mpi3mr_process_factsdata(mrioc, facts_data);
3170 out_unlock:
3171 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3172 	mutex_unlock(&mrioc->init_cmds.mutex);
3173 
3174 out:
3175 	if (data)
3176 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
3177 
3178 	return retval;
3179 }
3180 
3181 /**
3182  * mpi3mr_check_reset_dma_mask - Process IOC facts data
3183  * @mrioc: Adapter instance reference
3184  *
3185  * Check whether the new DMA mask requested through IOCFacts by
3186  * firmware needs to be set, if so set it .
3187  *
3188  * Return: 0 on success, non-zero on failure.
3189  */
mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc * mrioc)3190 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
3191 {
3192 	struct pci_dev *pdev = mrioc->pdev;
3193 	int r;
3194 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
3195 
3196 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
3197 		return 0;
3198 
3199 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
3200 	    mrioc->dma_mask, facts_dma_mask);
3201 
3202 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
3203 	if (r) {
3204 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
3205 		    facts_dma_mask, r);
3206 		return r;
3207 	}
3208 	mrioc->dma_mask = facts_dma_mask;
3209 	return r;
3210 }
3211 
3212 /**
3213  * mpi3mr_process_factsdata - Process IOC facts data
3214  * @mrioc: Adapter instance reference
3215  * @facts_data: Cached IOC facts data
3216  *
3217  * Convert IOC facts data into cpu endianness and cache it in
3218  * the driver .
3219  *
3220  * Return: Nothing.
3221  */
mpi3mr_process_factsdata(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3222 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
3223 	struct mpi3_ioc_facts_data *facts_data)
3224 {
3225 	u32 ioc_config, req_sz, facts_flags;
3226 
3227 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
3228 	    (sizeof(*facts_data) / 4)) {
3229 		ioc_warn(mrioc,
3230 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
3231 		    sizeof(*facts_data),
3232 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
3233 	}
3234 
3235 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3236 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
3237 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
3238 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
3239 		ioc_err(mrioc,
3240 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
3241 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
3242 	}
3243 
3244 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3245 
3246 	facts_flags = le32_to_cpu(facts_data->flags);
3247 	mrioc->facts.op_req_sz = req_sz;
3248 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3249 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3250 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3251 
3252 	mrioc->facts.ioc_num = facts_data->ioc_number;
3253 	mrioc->facts.who_init = facts_data->who_init;
3254 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3255 	mrioc->facts.personality = (facts_flags &
3256 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3257 	mrioc->facts.dma_mask = (facts_flags &
3258 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3259 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3260 	mrioc->facts.dma_mask = (facts_flags &
3261 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3262 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3263 	mrioc->facts.max_req_limit = (facts_flags &
3264 			MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT);
3265 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
3266 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3267 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3268 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3269 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3270 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3271 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3272 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3273 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3274 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3275 	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3276 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3277 	mrioc->facts.max_pcie_switches =
3278 	    le16_to_cpu(facts_data->max_pcie_switches);
3279 	mrioc->facts.max_sasexpanders =
3280 	    le16_to_cpu(facts_data->max_sas_expanders);
3281 	mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3282 	mrioc->facts.max_sasinitiators =
3283 	    le16_to_cpu(facts_data->max_sas_initiators);
3284 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3285 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3286 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3287 	mrioc->facts.max_op_req_q =
3288 	    le16_to_cpu(facts_data->max_operational_request_queues);
3289 	mrioc->facts.max_op_reply_q =
3290 	    le16_to_cpu(facts_data->max_operational_reply_queues);
3291 	mrioc->facts.ioc_capabilities =
3292 	    le32_to_cpu(facts_data->ioc_capabilities);
3293 	mrioc->facts.fw_ver.build_num =
3294 	    le16_to_cpu(facts_data->fw_version.build_num);
3295 	mrioc->facts.fw_ver.cust_id =
3296 	    le16_to_cpu(facts_data->fw_version.customer_id);
3297 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3298 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3299 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3300 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3301 	mrioc->msix_count = min_t(int, mrioc->msix_count,
3302 	    mrioc->facts.max_msix_vectors);
3303 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3304 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3305 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3306 	mrioc->facts.shutdown_timeout =
3307 	    le16_to_cpu(facts_data->shutdown_timeout);
3308 	mrioc->facts.diag_trace_sz =
3309 	    le32_to_cpu(facts_data->diag_trace_size);
3310 	mrioc->facts.diag_fw_sz =
3311 	    le32_to_cpu(facts_data->diag_fw_size);
3312 	mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3313 	mrioc->facts.max_dev_per_tg =
3314 	    facts_data->max_devices_per_throttle_group;
3315 	mrioc->facts.io_throttle_data_length =
3316 	    le16_to_cpu(facts_data->io_throttle_data_length);
3317 	mrioc->facts.max_io_throttle_group =
3318 	    le16_to_cpu(facts_data->max_io_throttle_group);
3319 	mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3320 	mrioc->facts.io_throttle_high =
3321 	    le16_to_cpu(facts_data->io_throttle_high);
3322 
3323 	if (mrioc->facts.max_data_length ==
3324 	    MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3325 		mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3326 	else
3327 		mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3328 	/* Store in 512b block count */
3329 	if (mrioc->facts.io_throttle_data_length)
3330 		mrioc->io_throttle_data_length =
3331 		    (mrioc->facts.io_throttle_data_length * 2 * 4);
3332 	else
3333 		/* set the length to 1MB + 1K to disable throttle */
3334 		mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3335 
3336 	mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3337 	mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3338 
3339 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3340 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3341 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3342 	ioc_info(mrioc,
3343 	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3344 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3345 	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3346 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3347 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3348 	    mrioc->facts.sge_mod_shift);
3349 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3350 	    mrioc->facts.dma_mask, (facts_flags &
3351 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3352 	ioc_info(mrioc,
3353 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3354 	    mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3355 	ioc_info(mrioc,
3356 	   "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3357 	   mrioc->facts.io_throttle_data_length * 4,
3358 	   mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3359 }
3360 
3361 /**
3362  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3363  * @mrioc: Adapter instance reference
3364  *
3365  * Allocate and initialize the reply free buffers, sense
3366  * buffers, reply free queue and sense buffer queue.
3367  *
3368  * Return: 0 on success, non-zero on failures.
3369  */
mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc * mrioc)3370 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3371 {
3372 	int retval = 0;
3373 	u32 sz, i;
3374 
3375 	if (mrioc->init_cmds.reply)
3376 		return retval;
3377 
3378 	mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3379 	if (!mrioc->init_cmds.reply)
3380 		goto out_failed;
3381 
3382 	mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3383 	if (!mrioc->bsg_cmds.reply)
3384 		goto out_failed;
3385 
3386 	mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3387 	if (!mrioc->transport_cmds.reply)
3388 		goto out_failed;
3389 
3390 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3391 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3392 		    GFP_KERNEL);
3393 		if (!mrioc->dev_rmhs_cmds[i].reply)
3394 			goto out_failed;
3395 	}
3396 
3397 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3398 		mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3399 		    GFP_KERNEL);
3400 		if (!mrioc->evtack_cmds[i].reply)
3401 			goto out_failed;
3402 	}
3403 
3404 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3405 	if (!mrioc->host_tm_cmds.reply)
3406 		goto out_failed;
3407 
3408 	mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3409 	if (!mrioc->pel_cmds.reply)
3410 		goto out_failed;
3411 
3412 	mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3413 	if (!mrioc->pel_abort_cmd.reply)
3414 		goto out_failed;
3415 
3416 	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3417 	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3418 						 GFP_KERNEL);
3419 	if (!mrioc->removepend_bitmap)
3420 		goto out_failed;
3421 
3422 	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3423 	if (!mrioc->devrem_bitmap)
3424 		goto out_failed;
3425 
3426 	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3427 						  GFP_KERNEL);
3428 	if (!mrioc->evtack_cmds_bitmap)
3429 		goto out_failed;
3430 
3431 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3432 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3433 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3434 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3435 
3436 	/* reply buffer pool, 16 byte align */
3437 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3438 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3439 	    &mrioc->pdev->dev, sz, 16, 0);
3440 	if (!mrioc->reply_buf_pool) {
3441 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3442 		goto out_failed;
3443 	}
3444 
3445 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3446 	    &mrioc->reply_buf_dma);
3447 	if (!mrioc->reply_buf)
3448 		goto out_failed;
3449 
3450 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3451 
3452 	/* reply free queue, 8 byte align */
3453 	sz = mrioc->reply_free_qsz * 8;
3454 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3455 	    &mrioc->pdev->dev, sz, 8, 0);
3456 	if (!mrioc->reply_free_q_pool) {
3457 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3458 		goto out_failed;
3459 	}
3460 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3461 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
3462 	if (!mrioc->reply_free_q)
3463 		goto out_failed;
3464 
3465 	/* sense buffer pool,  4 byte align */
3466 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3467 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3468 	    &mrioc->pdev->dev, sz, 4, 0);
3469 	if (!mrioc->sense_buf_pool) {
3470 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3471 		goto out_failed;
3472 	}
3473 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3474 	    &mrioc->sense_buf_dma);
3475 	if (!mrioc->sense_buf)
3476 		goto out_failed;
3477 
3478 	/* sense buffer queue, 8 byte align */
3479 	sz = mrioc->sense_buf_q_sz * 8;
3480 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3481 	    &mrioc->pdev->dev, sz, 8, 0);
3482 	if (!mrioc->sense_buf_q_pool) {
3483 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3484 		goto out_failed;
3485 	}
3486 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3487 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
3488 	if (!mrioc->sense_buf_q)
3489 		goto out_failed;
3490 
3491 	return retval;
3492 
3493 out_failed:
3494 	retval = -1;
3495 	return retval;
3496 }
3497 
3498 /**
3499  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3500  * buffers
3501  * @mrioc: Adapter instance reference
3502  *
3503  * Helper function to initialize reply and sense buffers along
3504  * with some debug prints.
3505  *
3506  * Return:  None.
3507  */
mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc * mrioc)3508 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3509 {
3510 	u32 sz, i;
3511 	dma_addr_t phy_addr;
3512 
3513 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3514 	ioc_info(mrioc,
3515 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3516 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3517 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3518 	sz = mrioc->reply_free_qsz * 8;
3519 	ioc_info(mrioc,
3520 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3521 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3522 	    (unsigned long long)mrioc->reply_free_q_dma);
3523 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3524 	ioc_info(mrioc,
3525 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3526 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3527 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3528 	sz = mrioc->sense_buf_q_sz * 8;
3529 	ioc_info(mrioc,
3530 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3531 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3532 	    (unsigned long long)mrioc->sense_buf_q_dma);
3533 
3534 	/* initialize Reply buffer Queue */
3535 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
3536 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3537 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3538 	mrioc->reply_free_q[i] = cpu_to_le64(0);
3539 
3540 	/* initialize Sense Buffer Queue */
3541 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
3542 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3543 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3544 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
3545 }
3546 
3547 /**
3548  * mpi3mr_issue_iocinit - Send IOC Init
3549  * @mrioc: Adapter instance reference
3550  *
3551  * Issue IOC Init MPI request through admin queue and wait for
3552  * the completion of it or time out.
3553  *
3554  * Return: 0 on success, non-zero on failures.
3555  */
mpi3mr_issue_iocinit(struct mpi3mr_ioc * mrioc)3556 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3557 {
3558 	struct mpi3_ioc_init_request iocinit_req;
3559 	struct mpi3_driver_info_layout *drv_info;
3560 	dma_addr_t data_dma;
3561 	u32 data_len = sizeof(*drv_info);
3562 	int retval = 0;
3563 	ktime_t current_time;
3564 
3565 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3566 	    GFP_KERNEL);
3567 	if (!drv_info) {
3568 		retval = -1;
3569 		goto out;
3570 	}
3571 	mpimr_initialize_reply_sbuf_queues(mrioc);
3572 
3573 	drv_info->information_length = cpu_to_le32(data_len);
3574 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3575 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3576 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3577 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3578 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3579 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3580 	    sizeof(drv_info->driver_release_date));
3581 	drv_info->driver_capabilities = 0;
3582 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3583 	    sizeof(mrioc->driver_info));
3584 
3585 	memset(&iocinit_req, 0, sizeof(iocinit_req));
3586 	mutex_lock(&mrioc->init_cmds.mutex);
3587 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3588 		retval = -1;
3589 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3590 		mutex_unlock(&mrioc->init_cmds.mutex);
3591 		goto out;
3592 	}
3593 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3594 	mrioc->init_cmds.is_waiting = 1;
3595 	mrioc->init_cmds.callback = NULL;
3596 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3597 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3598 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3599 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3600 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3601 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3602 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3603 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3604 	iocinit_req.reply_free_queue_address =
3605 	    cpu_to_le64(mrioc->reply_free_q_dma);
3606 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3607 	iocinit_req.sense_buffer_free_queue_depth =
3608 	    cpu_to_le16(mrioc->sense_buf_q_sz);
3609 	iocinit_req.sense_buffer_free_queue_address =
3610 	    cpu_to_le64(mrioc->sense_buf_q_dma);
3611 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3612 
3613 	current_time = ktime_get_real();
3614 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3615 
3616 	iocinit_req.msg_flags |=
3617 	    MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3618 	iocinit_req.msg_flags |=
3619 		MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3620 
3621 	init_completion(&mrioc->init_cmds.done);
3622 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3623 	    sizeof(iocinit_req), 1);
3624 	if (retval) {
3625 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3626 		goto out_unlock;
3627 	}
3628 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3629 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3630 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3631 		mpi3mr_check_rh_fault_ioc(mrioc,
3632 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3633 		ioc_err(mrioc, "ioc_init timed out\n");
3634 		retval = -1;
3635 		goto out_unlock;
3636 	}
3637 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3638 	    != MPI3_IOCSTATUS_SUCCESS) {
3639 		ioc_err(mrioc,
3640 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3641 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3642 		    mrioc->init_cmds.ioc_loginfo);
3643 		retval = -1;
3644 		goto out_unlock;
3645 	}
3646 
3647 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3648 	writel(mrioc->reply_free_queue_host_index,
3649 	    &mrioc->sysif_regs->reply_free_host_index);
3650 
3651 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
3652 	writel(mrioc->sbq_host_index,
3653 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
3654 out_unlock:
3655 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3656 	mutex_unlock(&mrioc->init_cmds.mutex);
3657 
3658 out:
3659 	if (drv_info)
3660 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3661 		    data_dma);
3662 
3663 	return retval;
3664 }
3665 
3666 /**
3667  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3668  * @mrioc: Adapter instance reference
3669  * @event: MPI event ID
3670  *
3671  * Un mask the specific event by resetting the event_mask
3672  * bitmap.
3673  *
3674  * Return: 0 on success, non-zero on failures.
3675  */
mpi3mr_unmask_events(struct mpi3mr_ioc * mrioc,u16 event)3676 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3677 {
3678 	u32 desired_event;
3679 	u8 word;
3680 
3681 	if (event >= 128)
3682 		return;
3683 
3684 	desired_event = (1 << (event % 32));
3685 	word = event / 32;
3686 
3687 	mrioc->event_masks[word] &= ~desired_event;
3688 }
3689 
3690 /**
3691  * mpi3mr_issue_event_notification - Send event notification
3692  * @mrioc: Adapter instance reference
3693  *
3694  * Issue event notification MPI request through admin queue and
3695  * wait for the completion of it or time out.
3696  *
3697  * Return: 0 on success, non-zero on failures.
3698  */
mpi3mr_issue_event_notification(struct mpi3mr_ioc * mrioc)3699 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3700 {
3701 	struct mpi3_event_notification_request evtnotify_req;
3702 	int retval = 0;
3703 	u8 i;
3704 
3705 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3706 	mutex_lock(&mrioc->init_cmds.mutex);
3707 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3708 		retval = -1;
3709 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3710 		mutex_unlock(&mrioc->init_cmds.mutex);
3711 		goto out;
3712 	}
3713 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3714 	mrioc->init_cmds.is_waiting = 1;
3715 	mrioc->init_cmds.callback = NULL;
3716 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3717 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3718 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3719 		evtnotify_req.event_masks[i] =
3720 		    cpu_to_le32(mrioc->event_masks[i]);
3721 	init_completion(&mrioc->init_cmds.done);
3722 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3723 	    sizeof(evtnotify_req), 1);
3724 	if (retval) {
3725 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3726 		goto out_unlock;
3727 	}
3728 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3729 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3730 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3731 		ioc_err(mrioc, "event notification timed out\n");
3732 		mpi3mr_check_rh_fault_ioc(mrioc,
3733 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3734 		retval = -1;
3735 		goto out_unlock;
3736 	}
3737 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3738 	    != MPI3_IOCSTATUS_SUCCESS) {
3739 		ioc_err(mrioc,
3740 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3741 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3742 		    mrioc->init_cmds.ioc_loginfo);
3743 		retval = -1;
3744 		goto out_unlock;
3745 	}
3746 
3747 out_unlock:
3748 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3749 	mutex_unlock(&mrioc->init_cmds.mutex);
3750 out:
3751 	return retval;
3752 }
3753 
3754 /**
3755  * mpi3mr_process_event_ack - Process event acknowledgment
3756  * @mrioc: Adapter instance reference
3757  * @event: MPI3 event ID
3758  * @event_ctx: event context
3759  *
3760  * Send event acknowledgment through admin queue and wait for
3761  * it to complete.
3762  *
3763  * Return: 0 on success, non-zero on failures.
3764  */
mpi3mr_process_event_ack(struct mpi3mr_ioc * mrioc,u8 event,u32 event_ctx)3765 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3766 	u32 event_ctx)
3767 {
3768 	struct mpi3_event_ack_request evtack_req;
3769 	int retval = 0;
3770 
3771 	memset(&evtack_req, 0, sizeof(evtack_req));
3772 	mutex_lock(&mrioc->init_cmds.mutex);
3773 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3774 		retval = -1;
3775 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3776 		mutex_unlock(&mrioc->init_cmds.mutex);
3777 		goto out;
3778 	}
3779 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3780 	mrioc->init_cmds.is_waiting = 1;
3781 	mrioc->init_cmds.callback = NULL;
3782 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3783 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3784 	evtack_req.event = event;
3785 	evtack_req.event_context = cpu_to_le32(event_ctx);
3786 
3787 	init_completion(&mrioc->init_cmds.done);
3788 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3789 	    sizeof(evtack_req), 1);
3790 	if (retval) {
3791 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3792 		goto out_unlock;
3793 	}
3794 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3795 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3796 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3797 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3798 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3799 			mpi3mr_check_rh_fault_ioc(mrioc,
3800 			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3801 		retval = -1;
3802 		goto out_unlock;
3803 	}
3804 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3805 	    != MPI3_IOCSTATUS_SUCCESS) {
3806 		ioc_err(mrioc,
3807 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3808 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3809 		    mrioc->init_cmds.ioc_loginfo);
3810 		retval = -1;
3811 		goto out_unlock;
3812 	}
3813 
3814 out_unlock:
3815 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3816 	mutex_unlock(&mrioc->init_cmds.mutex);
3817 out:
3818 	return retval;
3819 }
3820 
3821 /**
3822  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3823  * @mrioc: Adapter instance reference
3824  *
3825  * Allocate chain buffers and set a bitmap to indicate free
3826  * chain buffers. Chain buffers are used to pass the SGE
3827  * information along with MPI3 SCSI IO requests for host I/O.
3828  *
3829  * Return: 0 on success, non-zero on failure
3830  */
mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc * mrioc)3831 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3832 {
3833 	int retval = 0;
3834 	u32 sz, i;
3835 	u16 num_chains;
3836 
3837 	if (mrioc->chain_sgl_list)
3838 		return retval;
3839 
3840 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3841 
3842 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3843 	    | SHOST_DIX_TYPE1_PROTECTION
3844 	    | SHOST_DIX_TYPE2_PROTECTION
3845 	    | SHOST_DIX_TYPE3_PROTECTION))
3846 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3847 
3848 	mrioc->chain_buf_count = num_chains;
3849 	sz = sizeof(struct chain_element) * num_chains;
3850 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3851 	if (!mrioc->chain_sgl_list)
3852 		goto out_failed;
3853 
3854 	if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3855 		MPI3MR_PAGE_SIZE_4K))
3856 		mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3857 			MPI3MR_PAGE_SIZE_4K;
3858 	sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3859 	ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3860 			mrioc->max_sgl_entries, sz/1024);
3861 
3862 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3863 	    &mrioc->pdev->dev, sz, 16, 0);
3864 	if (!mrioc->chain_buf_pool) {
3865 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3866 		goto out_failed;
3867 	}
3868 
3869 	for (i = 0; i < num_chains; i++) {
3870 		mrioc->chain_sgl_list[i].addr =
3871 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3872 		    &mrioc->chain_sgl_list[i].dma_addr);
3873 
3874 		if (!mrioc->chain_sgl_list[i].addr)
3875 			goto out_failed;
3876 	}
3877 	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3878 	if (!mrioc->chain_bitmap)
3879 		goto out_failed;
3880 	return retval;
3881 out_failed:
3882 	retval = -1;
3883 	return retval;
3884 }
3885 
3886 /**
3887  * mpi3mr_port_enable_complete - Mark port enable complete
3888  * @mrioc: Adapter instance reference
3889  * @drv_cmd: Internal command tracker
3890  *
3891  * Call back for asynchronous port enable request sets the
3892  * driver command to indicate port enable request is complete.
3893  *
3894  * Return: Nothing
3895  */
mpi3mr_port_enable_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)3896 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3897 	struct mpi3mr_drv_cmd *drv_cmd)
3898 {
3899 	drv_cmd->callback = NULL;
3900 	mrioc->scan_started = 0;
3901 	if (drv_cmd->state & MPI3MR_CMD_RESET)
3902 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3903 	else
3904 		mrioc->scan_failed = drv_cmd->ioc_status;
3905 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3906 }
3907 
3908 /**
3909  * mpi3mr_issue_port_enable - Issue Port Enable
3910  * @mrioc: Adapter instance reference
3911  * @async: Flag to wait for completion or not
3912  *
3913  * Issue Port Enable MPI request through admin queue and if the
3914  * async flag is not set wait for the completion of the port
3915  * enable or time out.
3916  *
3917  * Return: 0 on success, non-zero on failures.
3918  */
mpi3mr_issue_port_enable(struct mpi3mr_ioc * mrioc,u8 async)3919 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3920 {
3921 	struct mpi3_port_enable_request pe_req;
3922 	int retval = 0;
3923 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3924 
3925 	memset(&pe_req, 0, sizeof(pe_req));
3926 	mutex_lock(&mrioc->init_cmds.mutex);
3927 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3928 		retval = -1;
3929 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3930 		mutex_unlock(&mrioc->init_cmds.mutex);
3931 		goto out;
3932 	}
3933 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3934 	if (async) {
3935 		mrioc->init_cmds.is_waiting = 0;
3936 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3937 	} else {
3938 		mrioc->init_cmds.is_waiting = 1;
3939 		mrioc->init_cmds.callback = NULL;
3940 		init_completion(&mrioc->init_cmds.done);
3941 	}
3942 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3943 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3944 
3945 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3946 	if (retval) {
3947 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3948 		goto out_unlock;
3949 	}
3950 	if (async) {
3951 		mutex_unlock(&mrioc->init_cmds.mutex);
3952 		goto out;
3953 	}
3954 
3955 	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3956 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3957 		ioc_err(mrioc, "port enable timed out\n");
3958 		retval = -1;
3959 		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3960 		goto out_unlock;
3961 	}
3962 	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3963 
3964 out_unlock:
3965 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3966 	mutex_unlock(&mrioc->init_cmds.mutex);
3967 out:
3968 	return retval;
3969 }
3970 
3971 /* Protocol type to name mapper structure */
3972 static const struct {
3973 	u8 protocol;
3974 	char *name;
3975 } mpi3mr_protocols[] = {
3976 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3977 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3978 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3979 };
3980 
3981 /* Capability to name mapper structure*/
3982 static const struct {
3983 	u32 capability;
3984 	char *name;
3985 } mpi3mr_capabilities[] = {
3986 	{ MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
3987 	{ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
3988 };
3989 
3990 /**
3991  * mpi3mr_repost_diag_bufs - repost host diag buffers
3992  * @mrioc: Adapter instance reference
3993  *
3994  * repost firmware and trace diag buffers based on global
3995  * trigger flag from driver page 2
3996  *
3997  * Return: 0 on success, non-zero on failures.
3998  */
mpi3mr_repost_diag_bufs(struct mpi3mr_ioc * mrioc)3999 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
4000 {
4001 	u64 global_trigger;
4002 	union mpi3mr_trigger_data prev_trigger_data;
4003 	struct diag_buffer_desc *trace_hdb = NULL;
4004 	struct diag_buffer_desc *fw_hdb = NULL;
4005 	int retval = 0;
4006 	bool trace_repost_needed = false;
4007 	bool fw_repost_needed = false;
4008 	u8 prev_trigger_type;
4009 
4010 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4011 	if (retval)
4012 		return -1;
4013 
4014 	trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
4015 	    MPI3_DIAG_BUFFER_TYPE_TRACE);
4016 
4017 	if (trace_hdb &&
4018 	    trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
4019 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
4020 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
4021 		trace_repost_needed = true;
4022 
4023 	fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
4024 
4025 	if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
4026 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
4027 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
4028 		fw_repost_needed = true;
4029 
4030 	if (trace_repost_needed || fw_repost_needed) {
4031 		global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
4032 		if (global_trigger &
4033 		      MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
4034 			trace_repost_needed = false;
4035 		if (global_trigger &
4036 		     MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
4037 			fw_repost_needed = false;
4038 	}
4039 
4040 	if (trace_repost_needed) {
4041 		prev_trigger_type = trace_hdb->trigger_type;
4042 		memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
4043 		    sizeof(trace_hdb->trigger_data));
4044 		retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
4045 		if (!retval) {
4046 			dprint_init(mrioc, "trace diag buffer reposted");
4047 			mpi3mr_set_trigger_data_in_hdb(trace_hdb,
4048 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
4049 		} else {
4050 			trace_hdb->trigger_type = prev_trigger_type;
4051 			memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
4052 			    sizeof(prev_trigger_data));
4053 			ioc_err(mrioc, "trace diag buffer repost failed");
4054 			return -1;
4055 		}
4056 	}
4057 
4058 	if (fw_repost_needed) {
4059 		prev_trigger_type = fw_hdb->trigger_type;
4060 		memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
4061 		    sizeof(fw_hdb->trigger_data));
4062 		retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
4063 		if (!retval) {
4064 			dprint_init(mrioc, "firmware diag buffer reposted");
4065 			mpi3mr_set_trigger_data_in_hdb(fw_hdb,
4066 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
4067 		} else {
4068 			fw_hdb->trigger_type = prev_trigger_type;
4069 			memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
4070 			    sizeof(prev_trigger_data));
4071 			ioc_err(mrioc, "firmware diag buffer repost failed");
4072 			return -1;
4073 		}
4074 	}
4075 	return retval;
4076 }
4077 
4078 /**
4079  * mpi3mr_read_tsu_interval - Update time stamp interval
4080  * @mrioc: Adapter instance reference
4081  *
4082  * Update time stamp interval if its defined in driver page 1,
4083  * otherwise use default value.
4084  *
4085  * Return: Nothing
4086  */
4087 static void
mpi3mr_read_tsu_interval(struct mpi3mr_ioc * mrioc)4088 mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
4089 {
4090 	struct mpi3_driver_page1 driver_pg1;
4091 	u16 pg_sz = sizeof(driver_pg1);
4092 	int retval = 0;
4093 
4094 	mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
4095 
4096 	retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
4097 	if (!retval && driver_pg1.time_stamp_update)
4098 		mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
4099 }
4100 
4101 /**
4102  * mpi3mr_print_ioc_info - Display controller information
4103  * @mrioc: Adapter instance reference
4104  *
4105  * Display controller personality, capability, supported
4106  * protocols etc.
4107  *
4108  * Return: Nothing
4109  */
4110 static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc * mrioc)4111 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
4112 {
4113 	int i = 0, bytes_written = 0;
4114 	const char *personality;
4115 	char protocol[50] = {0};
4116 	char capabilities[100] = {0};
4117 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
4118 
4119 	switch (mrioc->facts.personality) {
4120 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
4121 		personality = "Enhanced HBA";
4122 		break;
4123 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
4124 		personality = "RAID";
4125 		break;
4126 	default:
4127 		personality = "Unknown";
4128 		break;
4129 	}
4130 
4131 	ioc_info(mrioc, "Running in %s Personality", personality);
4132 
4133 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
4134 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
4135 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
4136 
4137 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
4138 		if (mrioc->facts.protocol_flags &
4139 		    mpi3mr_protocols[i].protocol) {
4140 			bytes_written += scnprintf(protocol + bytes_written,
4141 				    sizeof(protocol) - bytes_written, "%s%s",
4142 				    bytes_written ? "," : "",
4143 				    mpi3mr_protocols[i].name);
4144 		}
4145 	}
4146 
4147 	bytes_written = 0;
4148 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
4149 		if (mrioc->facts.protocol_flags &
4150 		    mpi3mr_capabilities[i].capability) {
4151 			bytes_written += scnprintf(capabilities + bytes_written,
4152 				    sizeof(capabilities) - bytes_written, "%s%s",
4153 				    bytes_written ? "," : "",
4154 				    mpi3mr_capabilities[i].name);
4155 		}
4156 	}
4157 
4158 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
4159 		 protocol, capabilities);
4160 }
4161 
4162 /**
4163  * mpi3mr_cleanup_resources - Free PCI resources
4164  * @mrioc: Adapter instance reference
4165  *
4166  * Unmap PCI device memory and disable PCI device.
4167  *
4168  * Return: 0 on success and non-zero on failure.
4169  */
mpi3mr_cleanup_resources(struct mpi3mr_ioc * mrioc)4170 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
4171 {
4172 	struct pci_dev *pdev = mrioc->pdev;
4173 
4174 	mpi3mr_cleanup_isr(mrioc);
4175 
4176 	if (mrioc->sysif_regs) {
4177 		iounmap((void __iomem *)mrioc->sysif_regs);
4178 		mrioc->sysif_regs = NULL;
4179 	}
4180 
4181 	if (pci_is_enabled(pdev)) {
4182 		if (mrioc->bars)
4183 			pci_release_selected_regions(pdev, mrioc->bars);
4184 		pci_disable_device(pdev);
4185 	}
4186 }
4187 
4188 /**
4189  * mpi3mr_setup_resources - Enable PCI resources
4190  * @mrioc: Adapter instance reference
4191  *
4192  * Enable PCI device memory, MSI-x registers and set DMA mask.
4193  *
4194  * Return: 0 on success and non-zero on failure.
4195  */
mpi3mr_setup_resources(struct mpi3mr_ioc * mrioc)4196 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
4197 {
4198 	struct pci_dev *pdev = mrioc->pdev;
4199 	u32 memap_sz = 0;
4200 	int i, retval = 0, capb = 0;
4201 	u16 message_control;
4202 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
4203 	    ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
4204 
4205 	if (pci_enable_device_mem(pdev)) {
4206 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
4207 		retval = -ENODEV;
4208 		goto out_failed;
4209 	}
4210 
4211 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4212 	if (!capb) {
4213 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
4214 		retval = -ENODEV;
4215 		goto out_failed;
4216 	}
4217 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
4218 
4219 	if (pci_request_selected_regions(pdev, mrioc->bars,
4220 	    mrioc->driver_name)) {
4221 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
4222 		retval = -ENODEV;
4223 		goto out_failed;
4224 	}
4225 
4226 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
4227 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4228 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
4229 			memap_sz = pci_resource_len(pdev, i);
4230 			mrioc->sysif_regs =
4231 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
4232 			break;
4233 		}
4234 	}
4235 
4236 	pci_set_master(pdev);
4237 
4238 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
4239 	if (retval) {
4240 		if (dma_mask != DMA_BIT_MASK(32)) {
4241 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
4242 			dma_mask = DMA_BIT_MASK(32);
4243 			retval = dma_set_mask_and_coherent(&pdev->dev,
4244 			    dma_mask);
4245 		}
4246 		if (retval) {
4247 			mrioc->dma_mask = 0;
4248 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
4249 			goto out_failed;
4250 		}
4251 	}
4252 	mrioc->dma_mask = dma_mask;
4253 
4254 	if (!mrioc->sysif_regs) {
4255 		ioc_err(mrioc,
4256 		    "Unable to map adapter memory or resource not found\n");
4257 		retval = -EINVAL;
4258 		goto out_failed;
4259 	}
4260 
4261 	pci_read_config_word(pdev, capb + 2, &message_control);
4262 	mrioc->msix_count = (message_control & 0x3FF) + 1;
4263 
4264 	pci_save_state(pdev);
4265 
4266 	pci_set_drvdata(pdev, mrioc->shost);
4267 
4268 	mpi3mr_ioc_disable_intr(mrioc);
4269 
4270 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
4271 	    (unsigned long long)mrioc->sysif_regs_phys,
4272 	    mrioc->sysif_regs, memap_sz);
4273 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4274 	    mrioc->msix_count);
4275 
4276 	if (!reset_devices && poll_queues > 0)
4277 		mrioc->requested_poll_qcount = min_t(int, poll_queues,
4278 				mrioc->msix_count - 2);
4279 	return retval;
4280 
4281 out_failed:
4282 	mpi3mr_cleanup_resources(mrioc);
4283 	return retval;
4284 }
4285 
4286 /**
4287  * mpi3mr_enable_events - Enable required events
4288  * @mrioc: Adapter instance reference
4289  *
4290  * This routine unmasks the events required by the driver by
4291  * sennding appropriate event mask bitmapt through an event
4292  * notification request.
4293  *
4294  * Return: 0 on success and non-zero on failure.
4295  */
mpi3mr_enable_events(struct mpi3mr_ioc * mrioc)4296 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4297 {
4298 	int retval = 0;
4299 	u32  i;
4300 
4301 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4302 		mrioc->event_masks[i] = -1;
4303 
4304 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4305 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4306 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4307 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4308 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4309 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4310 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4311 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4312 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4313 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4314 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4315 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4316 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4317 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4318 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4319 
4320 	retval = mpi3mr_issue_event_notification(mrioc);
4321 	if (retval)
4322 		ioc_err(mrioc, "failed to issue event notification %d\n",
4323 		    retval);
4324 	return retval;
4325 }
4326 
4327 /**
4328  * mpi3mr_init_ioc - Initialize the controller
4329  * @mrioc: Adapter instance reference
4330  *
4331  * This the controller initialization routine, executed either
4332  * after soft reset or from pci probe callback.
4333  * Setup the required resources, memory map the controller
4334  * registers, create admin and operational reply queue pairs,
4335  * allocate required memory for reply pool, sense buffer pool,
4336  * issue IOC init request to the firmware, unmask the events and
4337  * issue port enable to discover SAS/SATA/NVMe devies and RAID
4338  * volumes.
4339  *
4340  * Return: 0 on success and non-zero on failure.
4341  */
mpi3mr_init_ioc(struct mpi3mr_ioc * mrioc)4342 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4343 {
4344 	int retval = 0;
4345 	u8 retry = 0;
4346 	struct mpi3_ioc_facts_data facts_data;
4347 	u32 sz;
4348 
4349 retry_init:
4350 	retval = mpi3mr_bring_ioc_ready(mrioc);
4351 	if (retval) {
4352 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4353 		    retval);
4354 		goto out_failed_noretry;
4355 	}
4356 
4357 	retval = mpi3mr_setup_isr(mrioc, 1);
4358 	if (retval) {
4359 		ioc_err(mrioc, "Failed to setup ISR error %d\n",
4360 		    retval);
4361 		goto out_failed_noretry;
4362 	}
4363 
4364 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4365 	if (retval) {
4366 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4367 		    retval);
4368 		goto out_failed;
4369 	}
4370 
4371 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4372 	mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4373 	mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4374 	atomic_set(&mrioc->pend_large_data_sz, 0);
4375 
4376 	if (reset_devices)
4377 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4378 		    MPI3MR_HOST_IOS_KDUMP);
4379 
4380 	if (!(mrioc->facts.ioc_capabilities &
4381 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4382 		mrioc->sas_transport_enabled = 1;
4383 		mrioc->scsi_device_channel = 1;
4384 		mrioc->shost->max_channel = 1;
4385 		mrioc->shost->transportt = mpi3mr_transport_template;
4386 	}
4387 
4388 	if (mrioc->facts.max_req_limit)
4389 		mrioc->prevent_reply_qfull = true;
4390 
4391 	if (mrioc->facts.ioc_capabilities &
4392 		MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
4393 		mrioc->seg_tb_support = true;
4394 
4395 	mrioc->reply_sz = mrioc->facts.reply_sz;
4396 
4397 	retval = mpi3mr_check_reset_dma_mask(mrioc);
4398 	if (retval) {
4399 		ioc_err(mrioc, "Resetting dma mask failed %d\n",
4400 		    retval);
4401 		goto out_failed_noretry;
4402 	}
4403 
4404 	mpi3mr_read_tsu_interval(mrioc);
4405 	mpi3mr_print_ioc_info(mrioc);
4406 
4407 	dprint_init(mrioc, "allocating host diag buffers\n");
4408 	mpi3mr_alloc_diag_bufs(mrioc);
4409 
4410 	dprint_init(mrioc, "allocating ioctl dma buffers\n");
4411 	mpi3mr_alloc_ioctl_dma_memory(mrioc);
4412 
4413 	dprint_init(mrioc, "posting host diag buffers\n");
4414 	retval = mpi3mr_post_diag_bufs(mrioc);
4415 
4416 	if (retval)
4417 		ioc_warn(mrioc, "failed to post host diag buffers\n");
4418 
4419 	if (!mrioc->init_cmds.reply) {
4420 		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4421 		if (retval) {
4422 			ioc_err(mrioc,
4423 			    "%s :Failed to allocated reply sense buffers %d\n",
4424 			    __func__, retval);
4425 			goto out_failed_noretry;
4426 		}
4427 	}
4428 
4429 	if (!mrioc->chain_sgl_list) {
4430 		retval = mpi3mr_alloc_chain_bufs(mrioc);
4431 		if (retval) {
4432 			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4433 			    retval);
4434 			goto out_failed_noretry;
4435 		}
4436 	}
4437 
4438 	retval = mpi3mr_issue_iocinit(mrioc);
4439 	if (retval) {
4440 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4441 		    retval);
4442 		goto out_failed;
4443 	}
4444 
4445 	retval = mpi3mr_print_pkg_ver(mrioc);
4446 	if (retval) {
4447 		ioc_err(mrioc, "failed to get package version\n");
4448 		goto out_failed;
4449 	}
4450 
4451 	retval = mpi3mr_setup_isr(mrioc, 0);
4452 	if (retval) {
4453 		ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4454 		    retval);
4455 		goto out_failed_noretry;
4456 	}
4457 
4458 	retval = mpi3mr_create_op_queues(mrioc);
4459 	if (retval) {
4460 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4461 		    retval);
4462 		goto out_failed;
4463 	}
4464 
4465 	if (!mrioc->pel_seqnum_virt) {
4466 		dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4467 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4468 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4469 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4470 		    GFP_KERNEL);
4471 		if (!mrioc->pel_seqnum_virt) {
4472 			retval = -ENOMEM;
4473 			goto out_failed_noretry;
4474 		}
4475 	}
4476 
4477 	if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4478 		dprint_init(mrioc, "allocating memory for throttle groups\n");
4479 		sz = sizeof(struct mpi3mr_throttle_group_info);
4480 		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4481 		if (!mrioc->throttle_groups) {
4482 			retval = -1;
4483 			goto out_failed_noretry;
4484 		}
4485 	}
4486 
4487 	retval = mpi3mr_enable_events(mrioc);
4488 	if (retval) {
4489 		ioc_err(mrioc, "failed to enable events %d\n",
4490 		    retval);
4491 		goto out_failed;
4492 	}
4493 
4494 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4495 	if (retval) {
4496 		ioc_err(mrioc, "failed to refresh triggers\n");
4497 		goto out_failed;
4498 	}
4499 
4500 	ioc_info(mrioc, "controller initialization completed successfully\n");
4501 	return retval;
4502 out_failed:
4503 	if (retry < 2) {
4504 		retry++;
4505 		ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4506 		    retry);
4507 		mpi3mr_memset_buffers(mrioc);
4508 		goto retry_init;
4509 	}
4510 	retval = -1;
4511 out_failed_noretry:
4512 	ioc_err(mrioc, "controller initialization failed\n");
4513 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4514 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4515 	mrioc->unrecoverable = 1;
4516 	return retval;
4517 }
4518 
4519 /**
4520  * mpi3mr_reinit_ioc - Re-Initialize the controller
4521  * @mrioc: Adapter instance reference
4522  * @is_resume: Called from resume or reset path
4523  *
4524  * This the controller re-initialization routine, executed from
4525  * the soft reset handler or resume callback. Creates
4526  * operational reply queue pairs, allocate required memory for
4527  * reply pool, sense buffer pool, issue IOC init request to the
4528  * firmware, unmask the events and issue port enable to discover
4529  * SAS/SATA/NVMe devices and RAID volumes.
4530  *
4531  * Return: 0 on success and non-zero on failure.
4532  */
mpi3mr_reinit_ioc(struct mpi3mr_ioc * mrioc,u8 is_resume)4533 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4534 {
4535 	int retval = 0;
4536 	u8 retry = 0;
4537 	struct mpi3_ioc_facts_data facts_data;
4538 	u32 pe_timeout, ioc_status;
4539 
4540 retry_init:
4541 	pe_timeout =
4542 	    (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4543 
4544 	dprint_reset(mrioc, "bringing up the controller to ready state\n");
4545 	retval = mpi3mr_bring_ioc_ready(mrioc);
4546 	if (retval) {
4547 		ioc_err(mrioc, "failed to bring to ready state\n");
4548 		goto out_failed_noretry;
4549 	}
4550 
4551 	mrioc->io_admin_reset_sync = 0;
4552 	if (is_resume || mrioc->block_on_pci_err) {
4553 		dprint_reset(mrioc, "setting up single ISR\n");
4554 		retval = mpi3mr_setup_isr(mrioc, 1);
4555 		if (retval) {
4556 			ioc_err(mrioc, "failed to setup ISR\n");
4557 			goto out_failed_noretry;
4558 		}
4559 	} else
4560 		mpi3mr_ioc_enable_intr(mrioc);
4561 
4562 	dprint_reset(mrioc, "getting ioc_facts\n");
4563 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4564 	if (retval) {
4565 		ioc_err(mrioc, "failed to get ioc_facts\n");
4566 		goto out_failed;
4567 	}
4568 
4569 	dprint_reset(mrioc, "validating ioc_facts\n");
4570 	retval = mpi3mr_revalidate_factsdata(mrioc);
4571 	if (retval) {
4572 		ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4573 		goto out_failed_noretry;
4574 	}
4575 
4576 	mpi3mr_read_tsu_interval(mrioc);
4577 	mpi3mr_print_ioc_info(mrioc);
4578 
4579 	if (is_resume) {
4580 		dprint_reset(mrioc, "posting host diag buffers\n");
4581 		retval = mpi3mr_post_diag_bufs(mrioc);
4582 		if (retval)
4583 			ioc_warn(mrioc, "failed to post host diag buffers\n");
4584 	} else {
4585 		retval = mpi3mr_repost_diag_bufs(mrioc);
4586 		if (retval)
4587 			ioc_warn(mrioc, "failed to re post host diag buffers\n");
4588 	}
4589 
4590 	dprint_reset(mrioc, "sending ioc_init\n");
4591 	retval = mpi3mr_issue_iocinit(mrioc);
4592 	if (retval) {
4593 		ioc_err(mrioc, "failed to send ioc_init\n");
4594 		goto out_failed;
4595 	}
4596 
4597 	dprint_reset(mrioc, "getting package version\n");
4598 	retval = mpi3mr_print_pkg_ver(mrioc);
4599 	if (retval) {
4600 		ioc_err(mrioc, "failed to get package version\n");
4601 		goto out_failed;
4602 	}
4603 
4604 	if (is_resume || mrioc->block_on_pci_err) {
4605 		dprint_reset(mrioc, "setting up multiple ISR\n");
4606 		retval = mpi3mr_setup_isr(mrioc, 0);
4607 		if (retval) {
4608 			ioc_err(mrioc, "failed to re-setup ISR\n");
4609 			goto out_failed_noretry;
4610 		}
4611 	}
4612 
4613 	dprint_reset(mrioc, "creating operational queue pairs\n");
4614 	retval = mpi3mr_create_op_queues(mrioc);
4615 	if (retval) {
4616 		ioc_err(mrioc, "failed to create operational queue pairs\n");
4617 		goto out_failed;
4618 	}
4619 
4620 	if (!mrioc->pel_seqnum_virt) {
4621 		dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4622 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4623 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4624 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4625 		    GFP_KERNEL);
4626 		if (!mrioc->pel_seqnum_virt) {
4627 			retval = -ENOMEM;
4628 			goto out_failed_noretry;
4629 		}
4630 	}
4631 
4632 	if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4633 		ioc_err(mrioc,
4634 		    "cannot create minimum number of operational queues expected:%d created:%d\n",
4635 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4636 		retval = -1;
4637 		goto out_failed_noretry;
4638 	}
4639 
4640 	dprint_reset(mrioc, "enabling events\n");
4641 	retval = mpi3mr_enable_events(mrioc);
4642 	if (retval) {
4643 		ioc_err(mrioc, "failed to enable events\n");
4644 		goto out_failed;
4645 	}
4646 
4647 	mrioc->device_refresh_on = 1;
4648 	mpi3mr_add_event_wait_for_device_refresh(mrioc);
4649 
4650 	ioc_info(mrioc, "sending port enable\n");
4651 	retval = mpi3mr_issue_port_enable(mrioc, 1);
4652 	if (retval) {
4653 		ioc_err(mrioc, "failed to issue port enable\n");
4654 		goto out_failed;
4655 	}
4656 	do {
4657 		ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4658 		if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4659 			break;
4660 		if (!pci_device_is_present(mrioc->pdev))
4661 			mrioc->unrecoverable = 1;
4662 		if (mrioc->unrecoverable) {
4663 			retval = -1;
4664 			goto out_failed_noretry;
4665 		}
4666 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4667 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4668 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4669 			mpi3mr_print_fault_info(mrioc);
4670 			mrioc->init_cmds.is_waiting = 0;
4671 			mrioc->init_cmds.callback = NULL;
4672 			mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4673 			goto out_failed;
4674 		}
4675 	} while (--pe_timeout);
4676 
4677 	if (!pe_timeout) {
4678 		ioc_err(mrioc, "port enable timed out\n");
4679 		mpi3mr_check_rh_fault_ioc(mrioc,
4680 		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4681 		mrioc->init_cmds.is_waiting = 0;
4682 		mrioc->init_cmds.callback = NULL;
4683 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4684 		goto out_failed;
4685 	} else if (mrioc->scan_failed) {
4686 		ioc_err(mrioc,
4687 		    "port enable failed with status=0x%04x\n",
4688 		    mrioc->scan_failed);
4689 	} else
4690 		ioc_info(mrioc, "port enable completed successfully\n");
4691 
4692 	ioc_info(mrioc, "controller %s completed successfully\n",
4693 	    (is_resume)?"resume":"re-initialization");
4694 	return retval;
4695 out_failed:
4696 	if (retry < 2) {
4697 		retry++;
4698 		ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4699 		    (is_resume)?"resume":"re-initialization", retry);
4700 		mpi3mr_memset_buffers(mrioc);
4701 		goto retry_init;
4702 	}
4703 	retval = -1;
4704 out_failed_noretry:
4705 	ioc_err(mrioc, "controller %s is failed\n",
4706 	    (is_resume)?"resume":"re-initialization");
4707 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4708 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4709 	mrioc->unrecoverable = 1;
4710 	return retval;
4711 }
4712 
4713 /**
4714  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4715  *					segments
4716  * @mrioc: Adapter instance reference
4717  * @qidx: Operational reply queue index
4718  *
4719  * Return: Nothing.
4720  */
mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4721 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4722 {
4723 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4724 	struct segments *segments;
4725 	int i, size;
4726 
4727 	if (!op_reply_q->q_segments)
4728 		return;
4729 
4730 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4731 	segments = op_reply_q->q_segments;
4732 	for (i = 0; i < op_reply_q->num_segments; i++)
4733 		memset(segments[i].segment, 0, size);
4734 }
4735 
4736 /**
4737  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4738  *					segments
4739  * @mrioc: Adapter instance reference
4740  * @qidx: Operational request queue index
4741  *
4742  * Return: Nothing.
4743  */
mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4744 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4745 {
4746 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4747 	struct segments *segments;
4748 	int i, size;
4749 
4750 	if (!op_req_q->q_segments)
4751 		return;
4752 
4753 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4754 	segments = op_req_q->q_segments;
4755 	for (i = 0; i < op_req_q->num_segments; i++)
4756 		memset(segments[i].segment, 0, size);
4757 }
4758 
4759 /**
4760  * mpi3mr_memset_buffers - memset memory for a controller
4761  * @mrioc: Adapter instance reference
4762  *
4763  * clear all the memory allocated for a controller, typically
4764  * called post reset to reuse the memory allocated during the
4765  * controller init.
4766  *
4767  * Return: Nothing.
4768  */
mpi3mr_memset_buffers(struct mpi3mr_ioc * mrioc)4769 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4770 {
4771 	u16 i;
4772 	struct mpi3mr_throttle_group_info *tg;
4773 
4774 	mrioc->change_count = 0;
4775 	mrioc->active_poll_qcount = 0;
4776 	mrioc->default_qcount = 0;
4777 	if (mrioc->admin_req_base)
4778 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4779 	if (mrioc->admin_reply_base)
4780 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4781 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
4782 	atomic_set(&mrioc->admin_pend_isr, 0);
4783 
4784 	if (mrioc->init_cmds.reply) {
4785 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4786 		memset(mrioc->bsg_cmds.reply, 0,
4787 		    sizeof(*mrioc->bsg_cmds.reply));
4788 		memset(mrioc->host_tm_cmds.reply, 0,
4789 		    sizeof(*mrioc->host_tm_cmds.reply));
4790 		memset(mrioc->pel_cmds.reply, 0,
4791 		    sizeof(*mrioc->pel_cmds.reply));
4792 		memset(mrioc->pel_abort_cmd.reply, 0,
4793 		    sizeof(*mrioc->pel_abort_cmd.reply));
4794 		memset(mrioc->transport_cmds.reply, 0,
4795 		    sizeof(*mrioc->transport_cmds.reply));
4796 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4797 			memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4798 			    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4799 		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4800 			memset(mrioc->evtack_cmds[i].reply, 0,
4801 			    sizeof(*mrioc->evtack_cmds[i].reply));
4802 		bitmap_clear(mrioc->removepend_bitmap, 0,
4803 			     mrioc->dev_handle_bitmap_bits);
4804 		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4805 		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4806 			     MPI3MR_NUM_EVTACKCMD);
4807 	}
4808 
4809 	for (i = 0; i < mrioc->num_queues; i++) {
4810 		mrioc->op_reply_qinfo[i].qid = 0;
4811 		mrioc->op_reply_qinfo[i].ci = 0;
4812 		mrioc->op_reply_qinfo[i].num_replies = 0;
4813 		mrioc->op_reply_qinfo[i].ephase = 0;
4814 		atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4815 		atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4816 		mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4817 
4818 		mrioc->req_qinfo[i].ci = 0;
4819 		mrioc->req_qinfo[i].pi = 0;
4820 		mrioc->req_qinfo[i].num_requests = 0;
4821 		mrioc->req_qinfo[i].qid = 0;
4822 		mrioc->req_qinfo[i].reply_qid = 0;
4823 		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4824 		mpi3mr_memset_op_req_q_buffers(mrioc, i);
4825 	}
4826 
4827 	atomic_set(&mrioc->pend_large_data_sz, 0);
4828 	if (mrioc->throttle_groups) {
4829 		tg = mrioc->throttle_groups;
4830 		for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4831 			tg->id = 0;
4832 			tg->fw_qd = 0;
4833 			tg->modified_qd = 0;
4834 			tg->io_divert = 0;
4835 			tg->need_qd_reduction = 0;
4836 			tg->high = 0;
4837 			tg->low = 0;
4838 			tg->qd_reduction = 0;
4839 			atomic_set(&tg->pend_large_data_sz, 0);
4840 		}
4841 	}
4842 }
4843 
4844 /**
4845  * mpi3mr_free_mem - Free memory allocated for a controller
4846  * @mrioc: Adapter instance reference
4847  *
4848  * Free all the memory allocated for a controller.
4849  *
4850  * Return: Nothing.
4851  */
mpi3mr_free_mem(struct mpi3mr_ioc * mrioc)4852 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4853 {
4854 	u16 i, j;
4855 	struct mpi3mr_intr_info *intr_info;
4856 	struct diag_buffer_desc *diag_buffer;
4857 
4858 	mpi3mr_free_enclosure_list(mrioc);
4859 	mpi3mr_free_ioctl_dma_memory(mrioc);
4860 
4861 	if (mrioc->sense_buf_pool) {
4862 		if (mrioc->sense_buf)
4863 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4864 			    mrioc->sense_buf_dma);
4865 		dma_pool_destroy(mrioc->sense_buf_pool);
4866 		mrioc->sense_buf = NULL;
4867 		mrioc->sense_buf_pool = NULL;
4868 	}
4869 	if (mrioc->sense_buf_q_pool) {
4870 		if (mrioc->sense_buf_q)
4871 			dma_pool_free(mrioc->sense_buf_q_pool,
4872 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4873 		dma_pool_destroy(mrioc->sense_buf_q_pool);
4874 		mrioc->sense_buf_q = NULL;
4875 		mrioc->sense_buf_q_pool = NULL;
4876 	}
4877 
4878 	if (mrioc->reply_buf_pool) {
4879 		if (mrioc->reply_buf)
4880 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4881 			    mrioc->reply_buf_dma);
4882 		dma_pool_destroy(mrioc->reply_buf_pool);
4883 		mrioc->reply_buf = NULL;
4884 		mrioc->reply_buf_pool = NULL;
4885 	}
4886 	if (mrioc->reply_free_q_pool) {
4887 		if (mrioc->reply_free_q)
4888 			dma_pool_free(mrioc->reply_free_q_pool,
4889 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
4890 		dma_pool_destroy(mrioc->reply_free_q_pool);
4891 		mrioc->reply_free_q = NULL;
4892 		mrioc->reply_free_q_pool = NULL;
4893 	}
4894 
4895 	for (i = 0; i < mrioc->num_op_req_q; i++)
4896 		mpi3mr_free_op_req_q_segments(mrioc, i);
4897 
4898 	for (i = 0; i < mrioc->num_op_reply_q; i++)
4899 		mpi3mr_free_op_reply_q_segments(mrioc, i);
4900 
4901 	for (i = 0; i < mrioc->intr_info_count; i++) {
4902 		intr_info = mrioc->intr_info + i;
4903 		intr_info->op_reply_q = NULL;
4904 	}
4905 
4906 	kfree(mrioc->req_qinfo);
4907 	mrioc->req_qinfo = NULL;
4908 	mrioc->num_op_req_q = 0;
4909 
4910 	kfree(mrioc->op_reply_qinfo);
4911 	mrioc->op_reply_qinfo = NULL;
4912 	mrioc->num_op_reply_q = 0;
4913 
4914 	kfree(mrioc->init_cmds.reply);
4915 	mrioc->init_cmds.reply = NULL;
4916 
4917 	kfree(mrioc->bsg_cmds.reply);
4918 	mrioc->bsg_cmds.reply = NULL;
4919 
4920 	kfree(mrioc->host_tm_cmds.reply);
4921 	mrioc->host_tm_cmds.reply = NULL;
4922 
4923 	kfree(mrioc->pel_cmds.reply);
4924 	mrioc->pel_cmds.reply = NULL;
4925 
4926 	kfree(mrioc->pel_abort_cmd.reply);
4927 	mrioc->pel_abort_cmd.reply = NULL;
4928 
4929 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4930 		kfree(mrioc->evtack_cmds[i].reply);
4931 		mrioc->evtack_cmds[i].reply = NULL;
4932 	}
4933 
4934 	bitmap_free(mrioc->removepend_bitmap);
4935 	mrioc->removepend_bitmap = NULL;
4936 
4937 	bitmap_free(mrioc->devrem_bitmap);
4938 	mrioc->devrem_bitmap = NULL;
4939 
4940 	bitmap_free(mrioc->evtack_cmds_bitmap);
4941 	mrioc->evtack_cmds_bitmap = NULL;
4942 
4943 	bitmap_free(mrioc->chain_bitmap);
4944 	mrioc->chain_bitmap = NULL;
4945 
4946 	kfree(mrioc->transport_cmds.reply);
4947 	mrioc->transport_cmds.reply = NULL;
4948 
4949 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4950 		kfree(mrioc->dev_rmhs_cmds[i].reply);
4951 		mrioc->dev_rmhs_cmds[i].reply = NULL;
4952 	}
4953 
4954 	if (mrioc->chain_buf_pool) {
4955 		for (i = 0; i < mrioc->chain_buf_count; i++) {
4956 			if (mrioc->chain_sgl_list[i].addr) {
4957 				dma_pool_free(mrioc->chain_buf_pool,
4958 				    mrioc->chain_sgl_list[i].addr,
4959 				    mrioc->chain_sgl_list[i].dma_addr);
4960 				mrioc->chain_sgl_list[i].addr = NULL;
4961 			}
4962 		}
4963 		dma_pool_destroy(mrioc->chain_buf_pool);
4964 		mrioc->chain_buf_pool = NULL;
4965 	}
4966 
4967 	kfree(mrioc->chain_sgl_list);
4968 	mrioc->chain_sgl_list = NULL;
4969 
4970 	if (mrioc->admin_reply_base) {
4971 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4972 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
4973 		mrioc->admin_reply_base = NULL;
4974 	}
4975 	if (mrioc->admin_req_base) {
4976 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4977 		    mrioc->admin_req_base, mrioc->admin_req_dma);
4978 		mrioc->admin_req_base = NULL;
4979 	}
4980 
4981 	if (mrioc->pel_seqnum_virt) {
4982 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4983 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4984 		mrioc->pel_seqnum_virt = NULL;
4985 	}
4986 
4987 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
4988 		diag_buffer = &mrioc->diag_buffers[i];
4989 		if ((i == 0) && mrioc->seg_tb_support) {
4990 			if (mrioc->trace_buf_pool) {
4991 				for (j = 0; j < mrioc->num_tb_segs; j++) {
4992 					if (mrioc->trace_buf[j].segment) {
4993 						dma_pool_free(mrioc->trace_buf_pool,
4994 						    mrioc->trace_buf[j].segment,
4995 						    mrioc->trace_buf[j].segment_dma);
4996 						mrioc->trace_buf[j].segment = NULL;
4997 					}
4998 
4999 					mrioc->trace_buf[j].segment = NULL;
5000 				}
5001 				dma_pool_destroy(mrioc->trace_buf_pool);
5002 				mrioc->trace_buf_pool = NULL;
5003 			}
5004 
5005 			kfree(mrioc->trace_buf);
5006 			mrioc->trace_buf = NULL;
5007 			diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
5008 		}
5009 		if (diag_buffer->addr) {
5010 			dma_free_coherent(&mrioc->pdev->dev,
5011 			    diag_buffer->size, diag_buffer->addr,
5012 			    diag_buffer->dma_addr);
5013 			diag_buffer->addr = NULL;
5014 			diag_buffer->size = 0;
5015 			diag_buffer->type = 0;
5016 			diag_buffer->status = 0;
5017 		}
5018 	}
5019 
5020 	kfree(mrioc->throttle_groups);
5021 	mrioc->throttle_groups = NULL;
5022 
5023 	kfree(mrioc->logdata_buf);
5024 	mrioc->logdata_buf = NULL;
5025 
5026 }
5027 
5028 /**
5029  * mpi3mr_issue_ioc_shutdown - shutdown controller
5030  * @mrioc: Adapter instance reference
5031  *
5032  * Send shutodwn notification to the controller and wait for the
5033  * shutdown_timeout for it to be completed.
5034  *
5035  * Return: Nothing.
5036  */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc * mrioc)5037 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
5038 {
5039 	u32 ioc_config, ioc_status;
5040 	u8 retval = 1;
5041 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
5042 
5043 	ioc_info(mrioc, "Issuing shutdown Notification\n");
5044 	if (mrioc->unrecoverable) {
5045 		ioc_warn(mrioc,
5046 		    "IOC is unrecoverable shutdown is not issued\n");
5047 		return;
5048 	}
5049 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5050 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5051 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
5052 		ioc_info(mrioc, "shutdown already in progress\n");
5053 		return;
5054 	}
5055 
5056 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
5057 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
5058 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
5059 
5060 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
5061 
5062 	if (mrioc->facts.shutdown_timeout)
5063 		timeout = mrioc->facts.shutdown_timeout * 10;
5064 
5065 	do {
5066 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5067 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5068 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
5069 			retval = 0;
5070 			break;
5071 		}
5072 		msleep(100);
5073 	} while (--timeout);
5074 
5075 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5076 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
5077 
5078 	if (retval) {
5079 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5080 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
5081 			ioc_warn(mrioc,
5082 			    "shutdown still in progress after timeout\n");
5083 	}
5084 
5085 	ioc_info(mrioc,
5086 	    "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
5087 	    (!retval) ? "successful" : "failed", ioc_status,
5088 	    ioc_config);
5089 }
5090 
5091 /**
5092  * mpi3mr_cleanup_ioc - Cleanup controller
5093  * @mrioc: Adapter instance reference
5094  *
5095  * controller cleanup handler, Message unit reset or soft reset
5096  * and shutdown notification is issued to the controller.
5097  *
5098  * Return: Nothing.
5099  */
mpi3mr_cleanup_ioc(struct mpi3mr_ioc * mrioc)5100 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
5101 {
5102 	enum mpi3mr_iocstate ioc_state;
5103 
5104 	dprint_exit(mrioc, "cleaning up the controller\n");
5105 	mpi3mr_ioc_disable_intr(mrioc);
5106 
5107 	ioc_state = mpi3mr_get_iocstate(mrioc);
5108 
5109 	if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
5110 	    !mrioc->pci_err_recovery &&
5111 	    (ioc_state == MRIOC_STATE_READY)) {
5112 		if (mpi3mr_issue_and_process_mur(mrioc,
5113 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
5114 			mpi3mr_issue_reset(mrioc,
5115 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
5116 			    MPI3MR_RESET_FROM_MUR_FAILURE);
5117 		mpi3mr_issue_ioc_shutdown(mrioc);
5118 	}
5119 	dprint_exit(mrioc, "controller cleanup completed\n");
5120 }
5121 
5122 /**
5123  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5124  * @mrioc: Adapter instance reference
5125  * @cmdptr: Internal command tracker
5126  *
5127  * Complete an internal driver commands with state indicating it
5128  * is completed due to reset.
5129  *
5130  * Return: Nothing.
5131  */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * cmdptr)5132 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
5133 	struct mpi3mr_drv_cmd *cmdptr)
5134 {
5135 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5136 		cmdptr->state |= MPI3MR_CMD_RESET;
5137 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5138 		if (cmdptr->is_waiting) {
5139 			complete(&cmdptr->done);
5140 			cmdptr->is_waiting = 0;
5141 		} else if (cmdptr->callback)
5142 			cmdptr->callback(mrioc, cmdptr);
5143 	}
5144 }
5145 
5146 /**
5147  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
5148  * @mrioc: Adapter instance reference
5149  *
5150  * Flush all internal driver commands post reset
5151  *
5152  * Return: Nothing.
5153  */
mpi3mr_flush_drv_cmds(struct mpi3mr_ioc * mrioc)5154 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
5155 {
5156 	struct mpi3mr_drv_cmd *cmdptr;
5157 	u8 i;
5158 
5159 	cmdptr = &mrioc->init_cmds;
5160 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5161 
5162 	cmdptr = &mrioc->cfg_cmds;
5163 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5164 
5165 	cmdptr = &mrioc->bsg_cmds;
5166 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5167 	cmdptr = &mrioc->host_tm_cmds;
5168 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5169 
5170 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5171 		cmdptr = &mrioc->dev_rmhs_cmds[i];
5172 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5173 	}
5174 
5175 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5176 		cmdptr = &mrioc->evtack_cmds[i];
5177 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5178 	}
5179 
5180 	cmdptr = &mrioc->pel_cmds;
5181 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5182 
5183 	cmdptr = &mrioc->pel_abort_cmd;
5184 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5185 
5186 	cmdptr = &mrioc->transport_cmds;
5187 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5188 }
5189 
5190 /**
5191  * mpi3mr_pel_wait_post - Issue PEL Wait
5192  * @mrioc: Adapter instance reference
5193  * @drv_cmd: Internal command tracker
5194  *
5195  * Issue PEL Wait MPI request through admin queue and return.
5196  *
5197  * Return: Nothing.
5198  */
mpi3mr_pel_wait_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5199 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
5200 	struct mpi3mr_drv_cmd *drv_cmd)
5201 {
5202 	struct mpi3_pel_req_action_wait pel_wait;
5203 
5204 	mrioc->pel_abort_requested = false;
5205 
5206 	memset(&pel_wait, 0, sizeof(pel_wait));
5207 	drv_cmd->state = MPI3MR_CMD_PENDING;
5208 	drv_cmd->is_waiting = 0;
5209 	drv_cmd->callback = mpi3mr_pel_wait_complete;
5210 	drv_cmd->ioc_status = 0;
5211 	drv_cmd->ioc_loginfo = 0;
5212 	pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5213 	pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5214 	pel_wait.action = MPI3_PEL_ACTION_WAIT;
5215 	pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
5216 	pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
5217 	pel_wait.class = cpu_to_le16(mrioc->pel_class);
5218 	pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
5219 	dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
5220 	    mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
5221 
5222 	if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
5223 		dprint_bsg_err(mrioc,
5224 			    "Issuing PELWait: Admin post failed\n");
5225 		drv_cmd->state = MPI3MR_CMD_NOTUSED;
5226 		drv_cmd->callback = NULL;
5227 		drv_cmd->retry_count = 0;
5228 		mrioc->pel_enabled = false;
5229 	}
5230 }
5231 
5232 /**
5233  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
5234  * @mrioc: Adapter instance reference
5235  * @drv_cmd: Internal command tracker
5236  *
5237  * Issue PEL get sequence number MPI request through admin queue
5238  * and return.
5239  *
5240  * Return: 0 on success, non-zero on failure.
5241  */
mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5242 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
5243 	struct mpi3mr_drv_cmd *drv_cmd)
5244 {
5245 	struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
5246 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5247 	int retval = 0;
5248 
5249 	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
5250 	mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
5251 	mrioc->pel_cmds.is_waiting = 0;
5252 	mrioc->pel_cmds.ioc_status = 0;
5253 	mrioc->pel_cmds.ioc_loginfo = 0;
5254 	mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
5255 	pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5256 	pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5257 	pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
5258 	mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
5259 	    mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
5260 
5261 	retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
5262 			sizeof(pel_getseq_req), 0);
5263 	if (retval) {
5264 		if (drv_cmd) {
5265 			drv_cmd->state = MPI3MR_CMD_NOTUSED;
5266 			drv_cmd->callback = NULL;
5267 			drv_cmd->retry_count = 0;
5268 		}
5269 		mrioc->pel_enabled = false;
5270 	}
5271 
5272 	return retval;
5273 }
5274 
5275 /**
5276  * mpi3mr_pel_wait_complete - PELWait Completion callback
5277  * @mrioc: Adapter instance reference
5278  * @drv_cmd: Internal command tracker
5279  *
5280  * This is a callback handler for the PELWait request and
5281  * firmware completes a PELWait request when it is aborted or a
5282  * new PEL entry is available. This sends AEN to the application
5283  * and if the PELwait completion is not due to PELAbort then
5284  * this will send a request for new PEL Sequence number
5285  *
5286  * Return: Nothing.
5287  */
mpi3mr_pel_wait_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5288 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5289 	struct mpi3mr_drv_cmd *drv_cmd)
5290 {
5291 	struct mpi3_pel_reply *pel_reply = NULL;
5292 	u16 ioc_status, pe_log_status;
5293 	bool do_retry = false;
5294 
5295 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5296 		goto cleanup_drv_cmd;
5297 
5298 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5299 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5300 		ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5301 			__func__, ioc_status, drv_cmd->ioc_loginfo);
5302 		dprint_bsg_err(mrioc,
5303 		    "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5304 		    ioc_status, drv_cmd->ioc_loginfo);
5305 		do_retry = true;
5306 	}
5307 
5308 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5309 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5310 
5311 	if (!pel_reply) {
5312 		dprint_bsg_err(mrioc,
5313 		    "pel_wait: failed due to no reply\n");
5314 		goto out_failed;
5315 	}
5316 
5317 	pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5318 	if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5319 	    (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5320 		ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5321 			__func__, pe_log_status);
5322 		dprint_bsg_err(mrioc,
5323 		    "pel_wait: failed due to pel_log_status(0x%04x)\n",
5324 		    pe_log_status);
5325 		do_retry = true;
5326 	}
5327 
5328 	if (do_retry) {
5329 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5330 			drv_cmd->retry_count++;
5331 			dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5332 			    drv_cmd->retry_count);
5333 			mpi3mr_pel_wait_post(mrioc, drv_cmd);
5334 			return;
5335 		}
5336 		dprint_bsg_err(mrioc,
5337 		    "pel_wait: failed after all retries(%d)\n",
5338 		    drv_cmd->retry_count);
5339 		goto out_failed;
5340 	}
5341 	atomic64_inc(&event_counter);
5342 	if (!mrioc->pel_abort_requested) {
5343 		mrioc->pel_cmds.retry_count = 0;
5344 		mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5345 	}
5346 
5347 	return;
5348 out_failed:
5349 	mrioc->pel_enabled = false;
5350 cleanup_drv_cmd:
5351 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5352 	drv_cmd->callback = NULL;
5353 	drv_cmd->retry_count = 0;
5354 }
5355 
5356 /**
5357  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5358  * @mrioc: Adapter instance reference
5359  * @drv_cmd: Internal command tracker
5360  *
5361  * This is a callback handler for the PEL get sequence number
5362  * request and a new PEL wait request will be issued to the
5363  * firmware from this
5364  *
5365  * Return: Nothing.
5366  */
mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5367 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5368 	struct mpi3mr_drv_cmd *drv_cmd)
5369 {
5370 	struct mpi3_pel_reply *pel_reply = NULL;
5371 	struct mpi3_pel_seq *pel_seqnum_virt;
5372 	u16 ioc_status;
5373 	bool do_retry = false;
5374 
5375 	pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5376 
5377 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5378 		goto cleanup_drv_cmd;
5379 
5380 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5381 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5382 		dprint_bsg_err(mrioc,
5383 		    "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5384 		    ioc_status, drv_cmd->ioc_loginfo);
5385 		do_retry = true;
5386 	}
5387 
5388 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5389 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5390 	if (!pel_reply) {
5391 		dprint_bsg_err(mrioc,
5392 		    "pel_get_seqnum: failed due to no reply\n");
5393 		goto out_failed;
5394 	}
5395 
5396 	if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5397 		dprint_bsg_err(mrioc,
5398 		    "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5399 		    le16_to_cpu(pel_reply->pe_log_status));
5400 		do_retry = true;
5401 	}
5402 
5403 	if (do_retry) {
5404 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5405 			drv_cmd->retry_count++;
5406 			dprint_bsg_err(mrioc,
5407 			    "pel_get_seqnum: retrying(%d)\n",
5408 			    drv_cmd->retry_count);
5409 			mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5410 			return;
5411 		}
5412 
5413 		dprint_bsg_err(mrioc,
5414 		    "pel_get_seqnum: failed after all retries(%d)\n",
5415 		    drv_cmd->retry_count);
5416 		goto out_failed;
5417 	}
5418 	mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5419 	drv_cmd->retry_count = 0;
5420 	mpi3mr_pel_wait_post(mrioc, drv_cmd);
5421 
5422 	return;
5423 out_failed:
5424 	mrioc->pel_enabled = false;
5425 cleanup_drv_cmd:
5426 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5427 	drv_cmd->callback = NULL;
5428 	drv_cmd->retry_count = 0;
5429 }
5430 
5431 /**
5432  * mpi3mr_check_op_admin_proc -
5433  * @mrioc: Adapter instance reference
5434  *
5435  * Check if any of the operation reply queues
5436  * or the admin reply queue are currently in use.
5437  * If any queue is in use, this function waits for
5438  * a maximum of 10 seconds for them to become available.
5439  *
5440  * Return: 0 on success, non-zero on failure.
5441  */
mpi3mr_check_op_admin_proc(struct mpi3mr_ioc * mrioc)5442 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
5443 {
5444 
5445 	u16 timeout = 10 * 10;
5446 	u16 elapsed_time = 0;
5447 	bool op_admin_in_use = false;
5448 
5449 	do {
5450 		op_admin_in_use = false;
5451 
5452 		/* Check admin_reply queue first to exit early */
5453 		if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
5454 			op_admin_in_use = true;
5455 		else {
5456 			/* Check op_reply queues */
5457 			int i;
5458 
5459 			for (i = 0; i < mrioc->num_queues; i++) {
5460 				if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
5461 					op_admin_in_use = true;
5462 					break;
5463 				}
5464 			}
5465 		}
5466 
5467 		if (!op_admin_in_use)
5468 			break;
5469 
5470 		msleep(100);
5471 
5472 	} while (++elapsed_time < timeout);
5473 
5474 	if (op_admin_in_use)
5475 		return 1;
5476 
5477 	return 0;
5478 }
5479 
5480 /**
5481  * mpi3mr_soft_reset_handler - Reset the controller
5482  * @mrioc: Adapter instance reference
5483  * @reset_reason: Reset reason code
5484  * @snapdump: Flag to generate snapdump in firmware or not
5485  *
5486  * This is an handler for recovering controller by issuing soft
5487  * reset are diag fault reset.  This is a blocking function and
5488  * when one reset is executed if any other resets they will be
5489  * blocked. All BSG requests will be blocked during the reset. If
5490  * controller reset is successful then the controller will be
5491  * reinitalized, otherwise the controller will be marked as not
5492  * recoverable
5493  *
5494  * In snapdump bit is set, the controller is issued with diag
5495  * fault reset so that the firmware can create a snap dump and
5496  * post that the firmware will result in F000 fault and the
5497  * driver will issue soft reset to recover from that.
5498  *
5499  * Return: 0 on success, non-zero on failure.
5500  */
mpi3mr_soft_reset_handler(struct mpi3mr_ioc * mrioc,u16 reset_reason,u8 snapdump)5501 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5502 	u16 reset_reason, u8 snapdump)
5503 {
5504 	int retval = 0, i;
5505 	unsigned long flags;
5506 	enum mpi3mr_iocstate ioc_state;
5507 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5508 	union mpi3mr_trigger_data trigger_data;
5509 
5510 	/* Block the reset handler until diag save in progress*/
5511 	dprint_reset(mrioc,
5512 	    "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5513 	    mrioc->diagsave_timeout);
5514 	while (mrioc->diagsave_timeout)
5515 		ssleep(1);
5516 	/*
5517 	 * Block new resets until the currently executing one is finished and
5518 	 * return the status of the existing reset for all blocked resets
5519 	 */
5520 	dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5521 	if (!mutex_trylock(&mrioc->reset_mutex)) {
5522 		ioc_info(mrioc,
5523 		    "controller reset triggered by %s is blocked due to another reset in progress\n",
5524 		    mpi3mr_reset_rc_name(reset_reason));
5525 		do {
5526 			ssleep(1);
5527 		} while (mrioc->reset_in_progress == 1);
5528 		ioc_info(mrioc,
5529 		    "returning previous reset result(%d) for the reset triggered by %s\n",
5530 		    mrioc->prev_reset_result,
5531 		    mpi3mr_reset_rc_name(reset_reason));
5532 		return mrioc->prev_reset_result;
5533 	}
5534 	ioc_info(mrioc, "controller reset is triggered by %s\n",
5535 	    mpi3mr_reset_rc_name(reset_reason));
5536 
5537 	mrioc->device_refresh_on = 0;
5538 	scsi_block_requests(mrioc->shost);
5539 	mrioc->reset_in_progress = 1;
5540 	mrioc->stop_bsgs = 1;
5541 	mrioc->prev_reset_result = -1;
5542 	memset(&trigger_data, 0, sizeof(trigger_data));
5543 
5544 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5545 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5546 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5547 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5548 		    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5549 		dprint_reset(mrioc,
5550 		    "soft_reset_handler: releasing host diagnostic buffers\n");
5551 		mpi3mr_release_diag_bufs(mrioc, 0);
5552 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5553 			mrioc->event_masks[i] = -1;
5554 
5555 		dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5556 		mpi3mr_issue_event_notification(mrioc);
5557 	}
5558 
5559 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5560 
5561 	mpi3mr_ioc_disable_intr(mrioc);
5562 	mrioc->io_admin_reset_sync = 1;
5563 
5564 	if (snapdump) {
5565 		retval = mpi3mr_issue_reset(mrioc,
5566 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5567 		if (!retval) {
5568 			trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5569 				      MPI3_SYSIF_FAULT_CODE_MASK);
5570 			do {
5571 				host_diagnostic =
5572 				    readl(&mrioc->sysif_regs->host_diagnostic);
5573 				if (!(host_diagnostic &
5574 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5575 					break;
5576 				msleep(100);
5577 			} while (--timeout);
5578 
5579 			mpi3mr_save_fault_info(mrioc);
5580 			mpi3mr_fault_uevent_emit(mrioc);
5581 			mrioc->fwfault_counter++;
5582 			mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5583 			    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5584 		}
5585 	}
5586 
5587 	retval = mpi3mr_issue_reset(mrioc,
5588 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5589 	if (retval) {
5590 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5591 		goto out;
5592 	}
5593 
5594 	retval = mpi3mr_check_op_admin_proc(mrioc);
5595 	if (retval) {
5596 		ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
5597 				"thread still processing replies even after a 10 second\n"
5598 				"timeout. Marking the controller as unrecoverable!\n");
5599 
5600 		goto out;
5601 	}
5602 
5603 	if (mrioc->num_io_throttle_group !=
5604 	    mrioc->facts.max_io_throttle_group) {
5605 		ioc_err(mrioc,
5606 		    "max io throttle group doesn't match old(%d), new(%d)\n",
5607 		    mrioc->num_io_throttle_group,
5608 		    mrioc->facts.max_io_throttle_group);
5609 		retval = -EPERM;
5610 		goto out;
5611 	}
5612 
5613 	mpi3mr_flush_delayed_cmd_lists(mrioc);
5614 	mpi3mr_flush_drv_cmds(mrioc);
5615 	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5616 	bitmap_clear(mrioc->removepend_bitmap, 0,
5617 		     mrioc->dev_handle_bitmap_bits);
5618 	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5619 	mpi3mr_flush_host_io(mrioc);
5620 	mpi3mr_cleanup_fwevt_list(mrioc);
5621 	mpi3mr_invalidate_devhandles(mrioc);
5622 	mpi3mr_free_enclosure_list(mrioc);
5623 
5624 	if (mrioc->prepare_for_reset) {
5625 		mrioc->prepare_for_reset = 0;
5626 		mrioc->prepare_for_reset_timeout_counter = 0;
5627 	}
5628 	mpi3mr_memset_buffers(mrioc);
5629 	mpi3mr_release_diag_bufs(mrioc, 1);
5630 	mrioc->fw_release_trigger_active = false;
5631 	mrioc->trace_release_trigger_active = false;
5632 	mrioc->snapdump_trigger_active = false;
5633 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5634 	    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5635 
5636 	dprint_reset(mrioc,
5637 	    "soft_reset_handler: reinitializing the controller\n");
5638 	retval = mpi3mr_reinit_ioc(mrioc, 0);
5639 	if (retval) {
5640 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5641 		    mrioc->name, reset_reason);
5642 		goto out;
5643 	}
5644 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5645 
5646 out:
5647 	if (!retval) {
5648 		mrioc->diagsave_timeout = 0;
5649 		mrioc->reset_in_progress = 0;
5650 		scsi_unblock_requests(mrioc->shost);
5651 		mrioc->pel_abort_requested = 0;
5652 		if (mrioc->pel_enabled) {
5653 			mrioc->pel_cmds.retry_count = 0;
5654 			mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5655 		}
5656 
5657 		mrioc->device_refresh_on = 0;
5658 
5659 		mrioc->ts_update_counter = 0;
5660 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5661 		if (mrioc->watchdog_work_q)
5662 			queue_delayed_work(mrioc->watchdog_work_q,
5663 			    &mrioc->watchdog_work,
5664 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5665 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5666 		mrioc->stop_bsgs = 0;
5667 		if (mrioc->pel_enabled)
5668 			atomic64_inc(&event_counter);
5669 	} else {
5670 		dprint_reset(mrioc,
5671 			"soft_reset_handler failed, marking controller as unrecoverable\n");
5672 		ioc_state = mpi3mr_get_iocstate(mrioc);
5673 
5674 		if (ioc_state != MRIOC_STATE_FAULT)
5675 			mpi3mr_issue_reset(mrioc,
5676 				MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5677 		mrioc->device_refresh_on = 0;
5678 		mrioc->unrecoverable = 1;
5679 		mrioc->reset_in_progress = 0;
5680 		scsi_unblock_requests(mrioc->shost);
5681 		mrioc->stop_bsgs = 0;
5682 		retval = -1;
5683 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5684 	}
5685 	mrioc->prev_reset_result = retval;
5686 	mutex_unlock(&mrioc->reset_mutex);
5687 	ioc_info(mrioc, "controller reset is %s\n",
5688 	    ((retval == 0) ? "successful" : "failed"));
5689 	return retval;
5690 }
5691 
5692 /**
5693  * mpi3mr_post_cfg_req - Issue config requests and wait
5694  * @mrioc: Adapter instance reference
5695  * @cfg_req: Configuration request
5696  * @timeout: Timeout in seconds
5697  * @ioc_status: Pointer to return ioc status
5698  *
5699  * A generic function for posting MPI3 configuration request to
5700  * the firmware. This blocks for the completion of request for
5701  * timeout seconds and if the request times out this function
5702  * faults the controller with proper reason code.
5703  *
5704  * On successful completion of the request this function returns
5705  * appropriate ioc status from the firmware back to the caller.
5706  *
5707  * Return: 0 on success, non-zero on failure.
5708  */
mpi3mr_post_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,int timeout,u16 * ioc_status)5709 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5710 	struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5711 {
5712 	int retval = 0;
5713 
5714 	mutex_lock(&mrioc->cfg_cmds.mutex);
5715 	if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5716 		retval = -1;
5717 		ioc_err(mrioc, "sending config request failed due to command in use\n");
5718 		mutex_unlock(&mrioc->cfg_cmds.mutex);
5719 		goto out;
5720 	}
5721 	mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5722 	mrioc->cfg_cmds.is_waiting = 1;
5723 	mrioc->cfg_cmds.callback = NULL;
5724 	mrioc->cfg_cmds.ioc_status = 0;
5725 	mrioc->cfg_cmds.ioc_loginfo = 0;
5726 
5727 	cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5728 	cfg_req->function = MPI3_FUNCTION_CONFIG;
5729 
5730 	init_completion(&mrioc->cfg_cmds.done);
5731 	dprint_cfg_info(mrioc, "posting config request\n");
5732 	if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5733 		dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5734 		    "mpi3_cfg_req");
5735 	retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5736 	if (retval) {
5737 		ioc_err(mrioc, "posting config request failed\n");
5738 		goto out_unlock;
5739 	}
5740 	wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5741 	if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5742 		mpi3mr_check_rh_fault_ioc(mrioc,
5743 		    MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5744 		ioc_err(mrioc, "config request timed out\n");
5745 		retval = -1;
5746 		goto out_unlock;
5747 	}
5748 	*ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5749 	if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5750 		dprint_cfg_err(mrioc,
5751 		    "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5752 		    *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5753 
5754 out_unlock:
5755 	mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5756 	mutex_unlock(&mrioc->cfg_cmds.mutex);
5757 
5758 out:
5759 	return retval;
5760 }
5761 
5762 /**
5763  * mpi3mr_process_cfg_req - config page request processor
5764  * @mrioc: Adapter instance reference
5765  * @cfg_req: Configuration request
5766  * @cfg_hdr: Configuration page header
5767  * @timeout: Timeout in seconds
5768  * @ioc_status: Pointer to return ioc status
5769  * @cfg_buf: Memory pointer to copy config page or header
5770  * @cfg_buf_sz: Size of the memory to get config page or header
5771  *
5772  * This is handler for config page read, write and config page
5773  * header read operations.
5774  *
5775  * This function expects the cfg_req to be populated with page
5776  * type, page number, action for the header read and with page
5777  * address for all other operations.
5778  *
5779  * The cfg_hdr can be passed as null for reading required header
5780  * details for read/write pages the cfg_hdr should point valid
5781  * configuration page header.
5782  *
5783  * This allocates dmaable memory based on the size of the config
5784  * buffer and set the SGE of the cfg_req.
5785  *
5786  * For write actions, the config page data has to be passed in
5787  * the cfg_buf and size of the data has to be mentioned in the
5788  * cfg_buf_sz.
5789  *
5790  * For read/header actions, on successful completion of the
5791  * request with successful ioc_status the data will be copied
5792  * into the cfg_buf limited to a minimum of actual page size and
5793  * cfg_buf_sz
5794  *
5795  *
5796  * Return: 0 on success, non-zero on failure.
5797  */
mpi3mr_process_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,struct mpi3_config_page_header * cfg_hdr,int timeout,u16 * ioc_status,void * cfg_buf,u32 cfg_buf_sz)5798 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5799 	struct mpi3_config_request *cfg_req,
5800 	struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5801 	void *cfg_buf, u32 cfg_buf_sz)
5802 {
5803 	struct dma_memory_desc mem_desc;
5804 	int retval = -1;
5805 	u8 invalid_action = 0;
5806 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5807 
5808 	memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5809 
5810 	if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5811 		mem_desc.size = sizeof(struct mpi3_config_page_header);
5812 	else {
5813 		if (!cfg_hdr) {
5814 			ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5815 			    cfg_req->action, cfg_req->page_type,
5816 			    cfg_req->page_number);
5817 			goto out;
5818 		}
5819 		switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5820 		case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5821 			if (cfg_req->action
5822 			    != MPI3_CONFIG_ACTION_READ_CURRENT)
5823 				invalid_action = 1;
5824 			break;
5825 		case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5826 			if ((cfg_req->action ==
5827 			     MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5828 			    (cfg_req->action ==
5829 			     MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5830 				invalid_action = 1;
5831 			break;
5832 		case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5833 		default:
5834 			break;
5835 		}
5836 		if (invalid_action) {
5837 			ioc_err(mrioc,
5838 			    "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5839 			    cfg_req->action, cfg_req->page_type,
5840 			    cfg_req->page_number, cfg_hdr->page_attribute);
5841 			goto out;
5842 		}
5843 		mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5844 		cfg_req->page_length = cfg_hdr->page_length;
5845 		cfg_req->page_version = cfg_hdr->page_version;
5846 	}
5847 
5848 	mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
5849 		mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
5850 
5851 	if (!mem_desc.addr)
5852 		return retval;
5853 
5854 	mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5855 	    mem_desc.dma_addr);
5856 
5857 	if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5858 	    (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5859 		memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5860 		    cfg_buf_sz));
5861 		dprint_cfg_info(mrioc, "config buffer to be written\n");
5862 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5863 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5864 	}
5865 
5866 	if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5867 		goto out;
5868 
5869 	retval = 0;
5870 	if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5871 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5872 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5873 		memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5874 		    cfg_buf_sz));
5875 		dprint_cfg_info(mrioc, "config buffer read\n");
5876 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5877 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5878 	}
5879 
5880 out:
5881 	if (mem_desc.addr) {
5882 		dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
5883 			mem_desc.addr, mem_desc.dma_addr);
5884 		mem_desc.addr = NULL;
5885 	}
5886 
5887 	return retval;
5888 }
5889 
5890 /**
5891  * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5892  * @mrioc: Adapter instance reference
5893  * @ioc_status: Pointer to return ioc status
5894  * @dev_pg0: Pointer to return device page 0
5895  * @pg_sz: Size of the memory allocated to the page pointer
5896  * @form: The form to be used for addressing the page
5897  * @form_spec: Form specific information like device handle
5898  *
5899  * This is handler for config page read for a specific device
5900  * page0. The ioc_status has the controller returned ioc_status.
5901  * This routine doesn't check ioc_status to decide whether the
5902  * page read is success or not and it is the callers
5903  * responsibility.
5904  *
5905  * Return: 0 on success, non-zero on failure.
5906  */
mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_device_page0 * dev_pg0,u16 pg_sz,u32 form,u32 form_spec)5907 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5908 	struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5909 {
5910 	struct mpi3_config_page_header cfg_hdr;
5911 	struct mpi3_config_request cfg_req;
5912 	u32 page_address;
5913 
5914 	memset(dev_pg0, 0, pg_sz);
5915 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5916 	memset(&cfg_req, 0, sizeof(cfg_req));
5917 
5918 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5919 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5920 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5921 	cfg_req.page_number = 0;
5922 	cfg_req.page_address = 0;
5923 
5924 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5925 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5926 		ioc_err(mrioc, "device page0 header read failed\n");
5927 		goto out_failed;
5928 	}
5929 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5930 		ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5931 		    *ioc_status);
5932 		goto out_failed;
5933 	}
5934 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5935 	page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5936 	    (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5937 	cfg_req.page_address = cpu_to_le32(page_address);
5938 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5939 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5940 		ioc_err(mrioc, "device page0 read failed\n");
5941 		goto out_failed;
5942 	}
5943 	return 0;
5944 out_failed:
5945 	return -1;
5946 }
5947 
5948 
5949 /**
5950  * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5951  * @mrioc: Adapter instance reference
5952  * @ioc_status: Pointer to return ioc status
5953  * @phy_pg0: Pointer to return SAS Phy page 0
5954  * @pg_sz: Size of the memory allocated to the page pointer
5955  * @form: The form to be used for addressing the page
5956  * @form_spec: Form specific information like phy number
5957  *
5958  * This is handler for config page read for a specific SAS Phy
5959  * page0. The ioc_status has the controller returned ioc_status.
5960  * This routine doesn't check ioc_status to decide whether the
5961  * page read is success or not and it is the callers
5962  * responsibility.
5963  *
5964  * Return: 0 on success, non-zero on failure.
5965  */
mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page0 * phy_pg0,u16 pg_sz,u32 form,u32 form_spec)5966 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5967 	struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5968 	u32 form_spec)
5969 {
5970 	struct mpi3_config_page_header cfg_hdr;
5971 	struct mpi3_config_request cfg_req;
5972 	u32 page_address;
5973 
5974 	memset(phy_pg0, 0, pg_sz);
5975 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5976 	memset(&cfg_req, 0, sizeof(cfg_req));
5977 
5978 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5979 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5980 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5981 	cfg_req.page_number = 0;
5982 	cfg_req.page_address = 0;
5983 
5984 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5985 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5986 		ioc_err(mrioc, "sas phy page0 header read failed\n");
5987 		goto out_failed;
5988 	}
5989 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5990 		ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5991 		    *ioc_status);
5992 		goto out_failed;
5993 	}
5994 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5995 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5996 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5997 	cfg_req.page_address = cpu_to_le32(page_address);
5998 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5999 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
6000 		ioc_err(mrioc, "sas phy page0 read failed\n");
6001 		goto out_failed;
6002 	}
6003 	return 0;
6004 out_failed:
6005 	return -1;
6006 }
6007 
6008 /**
6009  * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
6010  * @mrioc: Adapter instance reference
6011  * @ioc_status: Pointer to return ioc status
6012  * @phy_pg1: Pointer to return SAS Phy page 1
6013  * @pg_sz: Size of the memory allocated to the page pointer
6014  * @form: The form to be used for addressing the page
6015  * @form_spec: Form specific information like phy number
6016  *
6017  * This is handler for config page read for a specific SAS Phy
6018  * page1. The ioc_status has the controller returned ioc_status.
6019  * This routine doesn't check ioc_status to decide whether the
6020  * page read is success or not and it is the callers
6021  * responsibility.
6022  *
6023  * Return: 0 on success, non-zero on failure.
6024  */
mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page1 * phy_pg1,u16 pg_sz,u32 form,u32 form_spec)6025 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6026 	struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
6027 	u32 form_spec)
6028 {
6029 	struct mpi3_config_page_header cfg_hdr;
6030 	struct mpi3_config_request cfg_req;
6031 	u32 page_address;
6032 
6033 	memset(phy_pg1, 0, pg_sz);
6034 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6035 	memset(&cfg_req, 0, sizeof(cfg_req));
6036 
6037 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6038 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6039 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
6040 	cfg_req.page_number = 1;
6041 	cfg_req.page_address = 0;
6042 
6043 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6044 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6045 		ioc_err(mrioc, "sas phy page1 header read failed\n");
6046 		goto out_failed;
6047 	}
6048 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6049 		ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
6050 		    *ioc_status);
6051 		goto out_failed;
6052 	}
6053 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6054 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
6055 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
6056 	cfg_req.page_address = cpu_to_le32(page_address);
6057 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6058 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
6059 		ioc_err(mrioc, "sas phy page1 read failed\n");
6060 		goto out_failed;
6061 	}
6062 	return 0;
6063 out_failed:
6064 	return -1;
6065 }
6066 
6067 
6068 /**
6069  * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
6070  * @mrioc: Adapter instance reference
6071  * @ioc_status: Pointer to return ioc status
6072  * @exp_pg0: Pointer to return SAS Expander page 0
6073  * @pg_sz: Size of the memory allocated to the page pointer
6074  * @form: The form to be used for addressing the page
6075  * @form_spec: Form specific information like device handle
6076  *
6077  * This is handler for config page read for a specific SAS
6078  * Expander page0. The ioc_status has the controller returned
6079  * ioc_status. This routine doesn't check ioc_status to decide
6080  * whether the page read is success or not and it is the callers
6081  * responsibility.
6082  *
6083  * Return: 0 on success, non-zero on failure.
6084  */
mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page0 * exp_pg0,u16 pg_sz,u32 form,u32 form_spec)6085 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6086 	struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
6087 	u32 form_spec)
6088 {
6089 	struct mpi3_config_page_header cfg_hdr;
6090 	struct mpi3_config_request cfg_req;
6091 	u32 page_address;
6092 
6093 	memset(exp_pg0, 0, pg_sz);
6094 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6095 	memset(&cfg_req, 0, sizeof(cfg_req));
6096 
6097 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6098 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6099 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6100 	cfg_req.page_number = 0;
6101 	cfg_req.page_address = 0;
6102 
6103 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6104 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6105 		ioc_err(mrioc, "expander page0 header read failed\n");
6106 		goto out_failed;
6107 	}
6108 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6109 		ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
6110 		    *ioc_status);
6111 		goto out_failed;
6112 	}
6113 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6114 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6115 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6116 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6117 	cfg_req.page_address = cpu_to_le32(page_address);
6118 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6119 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
6120 		ioc_err(mrioc, "expander page0 read failed\n");
6121 		goto out_failed;
6122 	}
6123 	return 0;
6124 out_failed:
6125 	return -1;
6126 }
6127 
6128 /**
6129  * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
6130  * @mrioc: Adapter instance reference
6131  * @ioc_status: Pointer to return ioc status
6132  * @exp_pg1: Pointer to return SAS Expander page 1
6133  * @pg_sz: Size of the memory allocated to the page pointer
6134  * @form: The form to be used for addressing the page
6135  * @form_spec: Form specific information like phy number
6136  *
6137  * This is handler for config page read for a specific SAS
6138  * Expander page1. The ioc_status has the controller returned
6139  * ioc_status. This routine doesn't check ioc_status to decide
6140  * whether the page read is success or not and it is the callers
6141  * responsibility.
6142  *
6143  * Return: 0 on success, non-zero on failure.
6144  */
mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page1 * exp_pg1,u16 pg_sz,u32 form,u32 form_spec)6145 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6146 	struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
6147 	u32 form_spec)
6148 {
6149 	struct mpi3_config_page_header cfg_hdr;
6150 	struct mpi3_config_request cfg_req;
6151 	u32 page_address;
6152 
6153 	memset(exp_pg1, 0, pg_sz);
6154 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6155 	memset(&cfg_req, 0, sizeof(cfg_req));
6156 
6157 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6158 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6159 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6160 	cfg_req.page_number = 1;
6161 	cfg_req.page_address = 0;
6162 
6163 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6164 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6165 		ioc_err(mrioc, "expander page1 header read failed\n");
6166 		goto out_failed;
6167 	}
6168 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6169 		ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
6170 		    *ioc_status);
6171 		goto out_failed;
6172 	}
6173 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6174 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6175 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6176 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6177 	cfg_req.page_address = cpu_to_le32(page_address);
6178 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6179 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
6180 		ioc_err(mrioc, "expander page1 read failed\n");
6181 		goto out_failed;
6182 	}
6183 	return 0;
6184 out_failed:
6185 	return -1;
6186 }
6187 
6188 /**
6189  * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
6190  * @mrioc: Adapter instance reference
6191  * @ioc_status: Pointer to return ioc status
6192  * @encl_pg0: Pointer to return Enclosure page 0
6193  * @pg_sz: Size of the memory allocated to the page pointer
6194  * @form: The form to be used for addressing the page
6195  * @form_spec: Form specific information like device handle
6196  *
6197  * This is handler for config page read for a specific Enclosure
6198  * page0. The ioc_status has the controller returned ioc_status.
6199  * This routine doesn't check ioc_status to decide whether the
6200  * page read is success or not and it is the callers
6201  * responsibility.
6202  *
6203  * Return: 0 on success, non-zero on failure.
6204  */
mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_enclosure_page0 * encl_pg0,u16 pg_sz,u32 form,u32 form_spec)6205 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6206 	struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
6207 	u32 form_spec)
6208 {
6209 	struct mpi3_config_page_header cfg_hdr;
6210 	struct mpi3_config_request cfg_req;
6211 	u32 page_address;
6212 
6213 	memset(encl_pg0, 0, pg_sz);
6214 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6215 	memset(&cfg_req, 0, sizeof(cfg_req));
6216 
6217 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6218 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6219 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
6220 	cfg_req.page_number = 0;
6221 	cfg_req.page_address = 0;
6222 
6223 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6224 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6225 		ioc_err(mrioc, "enclosure page0 header read failed\n");
6226 		goto out_failed;
6227 	}
6228 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6229 		ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
6230 		    *ioc_status);
6231 		goto out_failed;
6232 	}
6233 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6234 	page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
6235 	    (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
6236 	cfg_req.page_address = cpu_to_le32(page_address);
6237 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6238 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
6239 		ioc_err(mrioc, "enclosure page0 read failed\n");
6240 		goto out_failed;
6241 	}
6242 	return 0;
6243 out_failed:
6244 	return -1;
6245 }
6246 
6247 
6248 /**
6249  * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
6250  * @mrioc: Adapter instance reference
6251  * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
6252  * @pg_sz: Size of the memory allocated to the page pointer
6253  *
6254  * This is handler for config page read for the SAS IO Unit
6255  * page0. This routine checks ioc_status to decide whether the
6256  * page read is success or not.
6257  *
6258  * Return: 0 on success, non-zero on failure.
6259  */
mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page0 * sas_io_unit_pg0,u16 pg_sz)6260 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
6261 	struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
6262 {
6263 	struct mpi3_config_page_header cfg_hdr;
6264 	struct mpi3_config_request cfg_req;
6265 	u16 ioc_status = 0;
6266 
6267 	memset(sas_io_unit_pg0, 0, pg_sz);
6268 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6269 	memset(&cfg_req, 0, sizeof(cfg_req));
6270 
6271 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6272 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6273 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6274 	cfg_req.page_number = 0;
6275 	cfg_req.page_address = 0;
6276 
6277 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6278 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6279 		ioc_err(mrioc, "sas io unit page0 header read failed\n");
6280 		goto out_failed;
6281 	}
6282 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6283 		ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
6284 		    ioc_status);
6285 		goto out_failed;
6286 	}
6287 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6288 
6289 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6290 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
6291 		ioc_err(mrioc, "sas io unit page0 read failed\n");
6292 		goto out_failed;
6293 	}
6294 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6295 		ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
6296 		    ioc_status);
6297 		goto out_failed;
6298 	}
6299 	return 0;
6300 out_failed:
6301 	return -1;
6302 }
6303 
6304 /**
6305  * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
6306  * @mrioc: Adapter instance reference
6307  * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
6308  * @pg_sz: Size of the memory allocated to the page pointer
6309  *
6310  * This is handler for config page read for the SAS IO Unit
6311  * page1. This routine checks ioc_status to decide whether the
6312  * page read is success or not.
6313  *
6314  * Return: 0 on success, non-zero on failure.
6315  */
mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6316 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6317 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6318 {
6319 	struct mpi3_config_page_header cfg_hdr;
6320 	struct mpi3_config_request cfg_req;
6321 	u16 ioc_status = 0;
6322 
6323 	memset(sas_io_unit_pg1, 0, pg_sz);
6324 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6325 	memset(&cfg_req, 0, sizeof(cfg_req));
6326 
6327 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6328 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6329 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6330 	cfg_req.page_number = 1;
6331 	cfg_req.page_address = 0;
6332 
6333 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6334 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6335 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6336 		goto out_failed;
6337 	}
6338 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6339 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6340 		    ioc_status);
6341 		goto out_failed;
6342 	}
6343 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6344 
6345 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6346 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6347 		ioc_err(mrioc, "sas io unit page1 read failed\n");
6348 		goto out_failed;
6349 	}
6350 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6351 		ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6352 		    ioc_status);
6353 		goto out_failed;
6354 	}
6355 	return 0;
6356 out_failed:
6357 	return -1;
6358 }
6359 
6360 /**
6361  * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6362  * @mrioc: Adapter instance reference
6363  * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6364  * @pg_sz: Size of the memory allocated to the page pointer
6365  *
6366  * This is handler for config page write for the SAS IO Unit
6367  * page1. This routine checks ioc_status to decide whether the
6368  * page read is success or not. This will modify both current
6369  * and persistent page.
6370  *
6371  * Return: 0 on success, non-zero on failure.
6372  */
mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6373 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6374 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6375 {
6376 	struct mpi3_config_page_header cfg_hdr;
6377 	struct mpi3_config_request cfg_req;
6378 	u16 ioc_status = 0;
6379 
6380 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6381 	memset(&cfg_req, 0, sizeof(cfg_req));
6382 
6383 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6384 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6385 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6386 	cfg_req.page_number = 1;
6387 	cfg_req.page_address = 0;
6388 
6389 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6390 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6391 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6392 		goto out_failed;
6393 	}
6394 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6395 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6396 		    ioc_status);
6397 		goto out_failed;
6398 	}
6399 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6400 
6401 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6402 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6403 		ioc_err(mrioc, "sas io unit page1 write current failed\n");
6404 		goto out_failed;
6405 	}
6406 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6407 		ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6408 		    ioc_status);
6409 		goto out_failed;
6410 	}
6411 
6412 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6413 
6414 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6415 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6416 		ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6417 		goto out_failed;
6418 	}
6419 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6420 		ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6421 		    ioc_status);
6422 		goto out_failed;
6423 	}
6424 	return 0;
6425 out_failed:
6426 	return -1;
6427 }
6428 
6429 /**
6430  * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6431  * @mrioc: Adapter instance reference
6432  * @driver_pg1: Pointer to return Driver page 1
6433  * @pg_sz: Size of the memory allocated to the page pointer
6434  *
6435  * This is handler for config page read for the Driver page1.
6436  * This routine checks ioc_status to decide whether the page
6437  * read is success or not.
6438  *
6439  * Return: 0 on success, non-zero on failure.
6440  */
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page1 * driver_pg1,u16 pg_sz)6441 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6442 	struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6443 {
6444 	struct mpi3_config_page_header cfg_hdr;
6445 	struct mpi3_config_request cfg_req;
6446 	u16 ioc_status = 0;
6447 
6448 	memset(driver_pg1, 0, pg_sz);
6449 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6450 	memset(&cfg_req, 0, sizeof(cfg_req));
6451 
6452 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6453 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6454 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6455 	cfg_req.page_number = 1;
6456 	cfg_req.page_address = 0;
6457 
6458 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6459 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6460 		ioc_err(mrioc, "driver page1 header read failed\n");
6461 		goto out_failed;
6462 	}
6463 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6464 		ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6465 		    ioc_status);
6466 		goto out_failed;
6467 	}
6468 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6469 
6470 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6471 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6472 		ioc_err(mrioc, "driver page1 read failed\n");
6473 		goto out_failed;
6474 	}
6475 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6476 		ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6477 		    ioc_status);
6478 		goto out_failed;
6479 	}
6480 	return 0;
6481 out_failed:
6482 	return -1;
6483 }
6484 
6485 /**
6486  * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6487  * @mrioc: Adapter instance reference
6488  * @driver_pg2: Pointer to return driver page 2
6489  * @pg_sz: Size of the memory allocated to the page pointer
6490  * @page_action: Page action
6491  *
6492  * This is handler for config page read for the driver page2.
6493  * This routine checks ioc_status to decide whether the page
6494  * read is success or not.
6495  *
6496  * Return: 0 on success, non-zero on failure.
6497  */
mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page2 * driver_pg2,u16 pg_sz,u8 page_action)6498 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6499 	struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6500 {
6501 	struct mpi3_config_page_header cfg_hdr;
6502 	struct mpi3_config_request cfg_req;
6503 	u16 ioc_status = 0;
6504 
6505 	memset(driver_pg2, 0, pg_sz);
6506 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6507 	memset(&cfg_req, 0, sizeof(cfg_req));
6508 
6509 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6510 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6511 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6512 	cfg_req.page_number = 2;
6513 	cfg_req.page_address = 0;
6514 	cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6515 
6516 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6517 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6518 		ioc_err(mrioc, "driver page2 header read failed\n");
6519 		goto out_failed;
6520 	}
6521 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6522 		ioc_err(mrioc, "driver page2 header read failed with\n"
6523 			       "ioc_status(0x%04x)\n",
6524 		    ioc_status);
6525 		goto out_failed;
6526 	}
6527 	cfg_req.action = page_action;
6528 
6529 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6530 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6531 		ioc_err(mrioc, "driver page2 read failed\n");
6532 		goto out_failed;
6533 	}
6534 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6535 		ioc_err(mrioc, "driver page2 read failed with\n"
6536 			       "ioc_status(0x%04x)\n",
6537 		    ioc_status);
6538 		goto out_failed;
6539 	}
6540 	return 0;
6541 out_failed:
6542 	return -1;
6543 }
6544 
6545