xref: /linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision 3b5d535c635cbf88dbb63231cbae265b22e6a5f5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 	struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 	struct mpi3mr_drv_cmd *drv_cmd);
20 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24 static bool threaded_isr_poll = true;
25 module_param(threaded_isr_poll, bool, 0444);
26 MODULE_PARM_DESC(threaded_isr_poll,
27 			"Enablement of IRQ polling thread (default=true)");
28 
29 #if defined(writeq) && defined(CONFIG_64BIT)
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)30 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
31 	spinlock_t *write_queue_lock)
32 {
33 	writeq(b, addr);
34 }
35 #else
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)36 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
37 	spinlock_t *write_queue_lock)
38 {
39 	__u64 data_out = b;
40 	unsigned long flags;
41 
42 	spin_lock_irqsave(write_queue_lock, flags);
43 	writel((u32)(data_out), addr);
44 	writel((u32)(data_out >> 32), (addr + 4));
45 	spin_unlock_irqrestore(write_queue_lock, flags);
46 }
47 #endif
48 
49 static inline bool
mpi3mr_check_req_qfull(struct op_req_qinfo * op_req_q)50 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
51 {
52 	u16 pi, ci, max_entries;
53 	bool is_qfull = false;
54 
55 	pi = op_req_q->pi;
56 	ci = READ_ONCE(op_req_q->ci);
57 	max_entries = op_req_q->num_requests;
58 
59 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
60 		is_qfull = true;
61 
62 	return is_qfull;
63 }
64 
mpi3mr_sync_irqs(struct mpi3mr_ioc * mrioc)65 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
66 {
67 	u16 i, max_vectors;
68 
69 	max_vectors = mrioc->intr_info_count;
70 
71 	for (i = 0; i < max_vectors; i++)
72 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
73 }
74 
mpi3mr_ioc_disable_intr(struct mpi3mr_ioc * mrioc)75 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
76 {
77 	mrioc->intr_enabled = 0;
78 	mpi3mr_sync_irqs(mrioc);
79 }
80 
mpi3mr_ioc_enable_intr(struct mpi3mr_ioc * mrioc)81 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
82 {
83 	mrioc->intr_enabled = 1;
84 }
85 
mpi3mr_cleanup_isr(struct mpi3mr_ioc * mrioc)86 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
87 {
88 	u16 i;
89 
90 	mpi3mr_ioc_disable_intr(mrioc);
91 
92 	if (!mrioc->intr_info)
93 		return;
94 
95 	for (i = 0; i < mrioc->intr_info_count; i++)
96 		free_irq(pci_irq_vector(mrioc->pdev, i),
97 		    (mrioc->intr_info + i));
98 
99 	kfree(mrioc->intr_info);
100 	mrioc->intr_info = NULL;
101 	mrioc->intr_info_count = 0;
102 	mrioc->is_intr_info_set = false;
103 	pci_free_irq_vectors(mrioc->pdev);
104 }
105 
mpi3mr_add_sg_single(void * paddr,u8 flags,u32 length,dma_addr_t dma_addr)106 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
107 	dma_addr_t dma_addr)
108 {
109 	struct mpi3_sge_common *sgel = paddr;
110 
111 	sgel->flags = flags;
112 	sgel->length = cpu_to_le32(length);
113 	sgel->address = cpu_to_le64(dma_addr);
114 }
115 
mpi3mr_build_zero_len_sge(void * paddr)116 void mpi3mr_build_zero_len_sge(void *paddr)
117 {
118 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
119 
120 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
121 }
122 
mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)123 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
124 	dma_addr_t phys_addr)
125 {
126 	if (!phys_addr)
127 		return NULL;
128 
129 	if ((phys_addr < mrioc->reply_buf_dma) ||
130 	    (phys_addr > mrioc->reply_buf_dma_max_address))
131 		return NULL;
132 
133 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
134 }
135 
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)136 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
137 	dma_addr_t phys_addr)
138 {
139 	if (!phys_addr)
140 		return NULL;
141 
142 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
143 }
144 
mpi3mr_repost_reply_buf(struct mpi3mr_ioc * mrioc,u64 reply_dma)145 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
146 	u64 reply_dma)
147 {
148 	u32 old_idx = 0;
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
152 	old_idx  =  mrioc->reply_free_queue_host_index;
153 	mrioc->reply_free_queue_host_index = (
154 	    (mrioc->reply_free_queue_host_index ==
155 	    (mrioc->reply_free_qsz - 1)) ? 0 :
156 	    (mrioc->reply_free_queue_host_index + 1));
157 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
158 	writel(mrioc->reply_free_queue_host_index,
159 	    &mrioc->sysif_regs->reply_free_host_index);
160 	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
161 }
162 
mpi3mr_repost_sense_buf(struct mpi3mr_ioc * mrioc,u64 sense_buf_dma)163 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
164 	u64 sense_buf_dma)
165 {
166 	u32 old_idx = 0;
167 	unsigned long flags;
168 
169 	spin_lock_irqsave(&mrioc->sbq_lock, flags);
170 	old_idx  =  mrioc->sbq_host_index;
171 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
172 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
173 	    (mrioc->sbq_host_index + 1));
174 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
175 	writel(mrioc->sbq_host_index,
176 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
177 	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
178 }
179 
mpi3mr_print_event_data(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)180 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
181 	struct mpi3_event_notification_reply *event_reply)
182 {
183 	char *desc = NULL;
184 	u16 event;
185 
186 	if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
187 		return;
188 
189 	event = event_reply->event;
190 
191 	switch (event) {
192 	case MPI3_EVENT_LOG_DATA:
193 		desc = "Log Data";
194 		break;
195 	case MPI3_EVENT_CHANGE:
196 		desc = "Event Change";
197 		break;
198 	case MPI3_EVENT_GPIO_INTERRUPT:
199 		desc = "GPIO Interrupt";
200 		break;
201 	case MPI3_EVENT_CABLE_MGMT:
202 		desc = "Cable Management";
203 		break;
204 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
205 		desc = "Energy Pack Change";
206 		break;
207 	case MPI3_EVENT_DEVICE_ADDED:
208 	{
209 		struct mpi3_device_page0 *event_data =
210 		    (struct mpi3_device_page0 *)event_reply->event_data;
211 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
212 		    event_data->dev_handle, event_data->device_form);
213 		return;
214 	}
215 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
216 	{
217 		struct mpi3_device_page0 *event_data =
218 		    (struct mpi3_device_page0 *)event_reply->event_data;
219 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
220 		    event_data->dev_handle, event_data->device_form);
221 		return;
222 	}
223 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
224 	{
225 		struct mpi3_event_data_device_status_change *event_data =
226 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
227 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
228 		    event_data->dev_handle, event_data->reason_code);
229 		return;
230 	}
231 	case MPI3_EVENT_SAS_DISCOVERY:
232 	{
233 		struct mpi3_event_data_sas_discovery *event_data =
234 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
235 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
236 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
237 		    "start" : "stop",
238 		    le32_to_cpu(event_data->discovery_status));
239 		return;
240 	}
241 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
242 		desc = "SAS Broadcast Primitive";
243 		break;
244 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
245 		desc = "SAS Notify Primitive";
246 		break;
247 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
248 		desc = "SAS Init Device Status Change";
249 		break;
250 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
251 		desc = "SAS Init Table Overflow";
252 		break;
253 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
254 		desc = "SAS Topology Change List";
255 		break;
256 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
257 		desc = "Enclosure Device Status Change";
258 		break;
259 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
260 		desc = "Enclosure Added";
261 		break;
262 	case MPI3_EVENT_HARD_RESET_RECEIVED:
263 		desc = "Hard Reset Received";
264 		break;
265 	case MPI3_EVENT_SAS_PHY_COUNTER:
266 		desc = "SAS PHY Counter";
267 		break;
268 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
269 		desc = "SAS Device Discovery Error";
270 		break;
271 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
272 		desc = "PCIE Topology Change List";
273 		break;
274 	case MPI3_EVENT_PCIE_ENUMERATION:
275 	{
276 		struct mpi3_event_data_pcie_enumeration *event_data =
277 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
278 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
279 		    (event_data->reason_code ==
280 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
281 		if (event_data->enumeration_status)
282 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
283 			    le32_to_cpu(event_data->enumeration_status));
284 		return;
285 	}
286 	case MPI3_EVENT_PREPARE_FOR_RESET:
287 		desc = "Prepare For Reset";
288 		break;
289 	case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
290 		desc = "Diagnostic Buffer Status Change";
291 		break;
292 	}
293 
294 	if (!desc)
295 		return;
296 
297 	ioc_info(mrioc, "%s\n", desc);
298 }
299 
mpi3mr_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply * def_reply)300 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
301 	struct mpi3_default_reply *def_reply)
302 {
303 	struct mpi3_event_notification_reply *event_reply =
304 	    (struct mpi3_event_notification_reply *)def_reply;
305 
306 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
307 	mpi3mr_print_event_data(mrioc, event_reply);
308 	mpi3mr_os_handle_events(mrioc, event_reply);
309 }
310 
311 static struct mpi3mr_drv_cmd *
mpi3mr_get_drv_cmd(struct mpi3mr_ioc * mrioc,u16 host_tag,struct mpi3_default_reply * def_reply)312 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
313 	struct mpi3_default_reply *def_reply)
314 {
315 	u16 idx;
316 
317 	switch (host_tag) {
318 	case MPI3MR_HOSTTAG_INITCMDS:
319 		return &mrioc->init_cmds;
320 	case MPI3MR_HOSTTAG_CFG_CMDS:
321 		return &mrioc->cfg_cmds;
322 	case MPI3MR_HOSTTAG_BSG_CMDS:
323 		return &mrioc->bsg_cmds;
324 	case MPI3MR_HOSTTAG_BLK_TMS:
325 		return &mrioc->host_tm_cmds;
326 	case MPI3MR_HOSTTAG_PEL_ABORT:
327 		return &mrioc->pel_abort_cmd;
328 	case MPI3MR_HOSTTAG_PEL_WAIT:
329 		return &mrioc->pel_cmds;
330 	case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
331 		return &mrioc->transport_cmds;
332 	case MPI3MR_HOSTTAG_INVALID:
333 		if (def_reply && def_reply->function ==
334 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
335 			mpi3mr_handle_events(mrioc, def_reply);
336 		return NULL;
337 	default:
338 		break;
339 	}
340 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
341 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
342 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
343 		return &mrioc->dev_rmhs_cmds[idx];
344 	}
345 
346 	if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
347 	    host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
348 		idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
349 		return &mrioc->evtack_cmds[idx];
350 	}
351 
352 	return NULL;
353 }
354 
mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma)355 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
356 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
357 {
358 	u16 reply_desc_type, host_tag = 0;
359 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
360 	u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
361 	u32 ioc_loginfo = 0, sense_count = 0;
362 	struct mpi3_status_reply_descriptor *status_desc;
363 	struct mpi3_address_reply_descriptor *addr_desc;
364 	struct mpi3_success_reply_descriptor *success_desc;
365 	struct mpi3_default_reply *def_reply = NULL;
366 	struct mpi3mr_drv_cmd *cmdptr = NULL;
367 	struct mpi3_scsi_io_reply *scsi_reply;
368 	struct scsi_sense_hdr sshdr;
369 	u8 *sense_buf = NULL;
370 
371 	*reply_dma = 0;
372 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
373 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
374 	switch (reply_desc_type) {
375 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
376 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
377 		host_tag = le16_to_cpu(status_desc->host_tag);
378 		ioc_status = le16_to_cpu(status_desc->ioc_status);
379 		if (ioc_status &
380 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
381 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
382 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
383 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
384 		break;
385 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
386 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
387 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
388 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
389 		if (!def_reply)
390 			goto out;
391 		host_tag = le16_to_cpu(def_reply->host_tag);
392 		ioc_status = le16_to_cpu(def_reply->ioc_status);
393 		if (ioc_status &
394 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
395 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
396 		masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
397 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
398 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
399 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
400 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
401 			sense_count = le32_to_cpu(scsi_reply->sense_count);
402 			if (sense_buf) {
403 				scsi_normalize_sense(sense_buf, sense_count,
404 				    &sshdr);
405 				mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
406 				    sshdr.asc, sshdr.ascq);
407 			}
408 		}
409 		mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
410 		break;
411 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
412 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
413 		host_tag = le16_to_cpu(success_desc->host_tag);
414 		break;
415 	default:
416 		break;
417 	}
418 
419 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
420 	if (cmdptr) {
421 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
422 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
423 			cmdptr->ioc_loginfo = ioc_loginfo;
424 			if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
425 				cmdptr->ioc_status = ioc_status;
426 			else
427 				cmdptr->ioc_status = masked_ioc_status;
428 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
429 			if (def_reply) {
430 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
431 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
432 				    mrioc->reply_sz);
433 			}
434 			if (sense_buf && cmdptr->sensebuf) {
435 				cmdptr->is_sense = 1;
436 				memcpy(cmdptr->sensebuf, sense_buf,
437 				       MPI3MR_SENSE_BUF_SZ);
438 			}
439 			if (cmdptr->is_waiting) {
440 				cmdptr->is_waiting = 0;
441 				complete(&cmdptr->done);
442 			} else if (cmdptr->callback)
443 				cmdptr->callback(mrioc, cmdptr);
444 		}
445 	}
446 out:
447 	if (sense_buf)
448 		mpi3mr_repost_sense_buf(mrioc,
449 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
450 }
451 
mpi3mr_process_admin_reply_q(struct mpi3mr_ioc * mrioc)452 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
453 {
454 	u32 exp_phase = mrioc->admin_reply_ephase;
455 	u32 admin_reply_ci = mrioc->admin_reply_ci;
456 	u32 num_admin_replies = 0;
457 	u64 reply_dma = 0;
458 	u16 threshold_comps = 0;
459 	struct mpi3_default_reply_descriptor *reply_desc;
460 
461 	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
462 		atomic_inc(&mrioc->admin_pend_isr);
463 		return 0;
464 	}
465 
466 	atomic_set(&mrioc->admin_pend_isr, 0);
467 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
468 	    admin_reply_ci;
469 
470 	if ((le16_to_cpu(reply_desc->reply_flags) &
471 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
472 		atomic_dec(&mrioc->admin_reply_q_in_use);
473 		return 0;
474 	}
475 
476 	do {
477 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
478 			break;
479 
480 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
481 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
482 		if (reply_dma)
483 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
484 		num_admin_replies++;
485 		threshold_comps++;
486 		if (++admin_reply_ci == mrioc->num_admin_replies) {
487 			admin_reply_ci = 0;
488 			exp_phase ^= 1;
489 		}
490 		reply_desc =
491 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
492 		    admin_reply_ci;
493 		if ((le16_to_cpu(reply_desc->reply_flags) &
494 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
495 			break;
496 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
497 			writel(admin_reply_ci,
498 			    &mrioc->sysif_regs->admin_reply_queue_ci);
499 			threshold_comps = 0;
500 		}
501 	} while (1);
502 
503 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
504 	mrioc->admin_reply_ci = admin_reply_ci;
505 	mrioc->admin_reply_ephase = exp_phase;
506 	atomic_dec(&mrioc->admin_reply_q_in_use);
507 
508 	return num_admin_replies;
509 }
510 
511 /**
512  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
513  *	queue's consumer index from operational reply descriptor queue.
514  * @op_reply_q: op_reply_qinfo object
515  * @reply_ci: operational reply descriptor's queue consumer index
516  *
517  * Returns: reply descriptor frame address
518  */
519 static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo * op_reply_q,u32 reply_ci)520 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
521 {
522 	void *segment_base_addr;
523 	struct segments *segments = op_reply_q->q_segments;
524 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
525 
526 	segment_base_addr =
527 	    segments[reply_ci / op_reply_q->segment_qd].segment;
528 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
529 	    (reply_ci % op_reply_q->segment_qd);
530 	return reply_desc;
531 }
532 
533 /**
534  * mpi3mr_process_op_reply_q - Operational reply queue handler
535  * @mrioc: Adapter instance reference
536  * @op_reply_q: Operational reply queue info
537  *
538  * Checks the specific operational reply queue and drains the
539  * reply queue entries until the queue is empty and process the
540  * individual reply descriptors.
541  *
542  * Return: 0 if queue is already processed,or number of reply
543  *	    descriptors processed.
544  */
mpi3mr_process_op_reply_q(struct mpi3mr_ioc * mrioc,struct op_reply_qinfo * op_reply_q)545 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
546 	struct op_reply_qinfo *op_reply_q)
547 {
548 	struct op_req_qinfo *op_req_q;
549 	u32 exp_phase;
550 	u32 reply_ci;
551 	u32 num_op_reply = 0;
552 	u64 reply_dma = 0;
553 	struct mpi3_default_reply_descriptor *reply_desc;
554 	u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
555 
556 	reply_qidx = op_reply_q->qid - 1;
557 
558 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
559 		return 0;
560 
561 	exp_phase = op_reply_q->ephase;
562 	reply_ci = op_reply_q->ci;
563 
564 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
565 	if ((le16_to_cpu(reply_desc->reply_flags) &
566 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
567 		atomic_dec(&op_reply_q->in_use);
568 		return 0;
569 	}
570 
571 	do {
572 		if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
573 			break;
574 
575 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
576 		op_req_q = &mrioc->req_qinfo[req_q_idx];
577 
578 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
579 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
580 		    reply_qidx);
581 
582 		if (reply_dma)
583 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
584 		num_op_reply++;
585 		threshold_comps++;
586 
587 		if (++reply_ci == op_reply_q->num_replies) {
588 			reply_ci = 0;
589 			exp_phase ^= 1;
590 		}
591 
592 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
593 
594 		if ((le16_to_cpu(reply_desc->reply_flags) &
595 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
596 			break;
597 #ifndef CONFIG_PREEMPT_RT
598 		/*
599 		 * Exit completion loop to avoid CPU lockup
600 		 * Ensure remaining completion happens from threaded ISR.
601 		 */
602 		if ((num_op_reply > mrioc->max_host_ios) &&
603 			(threaded_isr_poll == true)) {
604 			op_reply_q->enable_irq_poll = true;
605 			break;
606 		}
607 #endif
608 		if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
609 			writel(reply_ci,
610 			    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
611 			atomic_sub(threshold_comps, &op_reply_q->pend_ios);
612 			threshold_comps = 0;
613 		}
614 	} while (1);
615 
616 	writel(reply_ci,
617 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
618 	op_reply_q->ci = reply_ci;
619 	op_reply_q->ephase = exp_phase;
620 	atomic_sub(threshold_comps, &op_reply_q->pend_ios);
621 	atomic_dec(&op_reply_q->in_use);
622 	return num_op_reply;
623 }
624 
625 /**
626  * mpi3mr_blk_mq_poll - Operational reply queue handler
627  * @shost: SCSI Host reference
628  * @queue_num: Request queue number (w.r.t OS it is hardware context number)
629  *
630  * Checks the specific operational reply queue and drains the
631  * reply queue entries until the queue is empty and process the
632  * individual reply descriptors.
633  *
634  * Return: 0 if queue is already processed,or number of reply
635  *	    descriptors processed.
636  */
mpi3mr_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)637 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
638 {
639 	int num_entries = 0;
640 	struct mpi3mr_ioc *mrioc;
641 
642 	mrioc = (struct mpi3mr_ioc *)shost->hostdata;
643 
644 	if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
645 	    mrioc->unrecoverable || mrioc->pci_err_recovery))
646 		return 0;
647 
648 	num_entries = mpi3mr_process_op_reply_q(mrioc,
649 			&mrioc->op_reply_qinfo[queue_num]);
650 
651 	return num_entries;
652 }
653 
mpi3mr_isr_primary(int irq,void * privdata)654 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
655 {
656 	struct mpi3mr_intr_info *intr_info = privdata;
657 	struct mpi3mr_ioc *mrioc;
658 	u16 midx;
659 	u32 num_admin_replies = 0, num_op_reply = 0;
660 
661 	if (!intr_info)
662 		return IRQ_NONE;
663 
664 	mrioc = intr_info->mrioc;
665 
666 	if (!mrioc->intr_enabled)
667 		return IRQ_NONE;
668 
669 	midx = intr_info->msix_index;
670 
671 	if (!midx)
672 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
673 	if (intr_info->op_reply_q)
674 		num_op_reply = mpi3mr_process_op_reply_q(mrioc,
675 		    intr_info->op_reply_q);
676 
677 	if (num_admin_replies || num_op_reply)
678 		return IRQ_HANDLED;
679 	else
680 		return IRQ_NONE;
681 }
682 
683 #ifndef CONFIG_PREEMPT_RT
684 
mpi3mr_isr(int irq,void * privdata)685 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
686 {
687 	struct mpi3mr_intr_info *intr_info = privdata;
688 	int ret;
689 
690 	if (!intr_info)
691 		return IRQ_NONE;
692 
693 	/* Call primary ISR routine */
694 	ret = mpi3mr_isr_primary(irq, privdata);
695 
696 	/*
697 	 * If more IOs are expected, schedule IRQ polling thread.
698 	 * Otherwise exit from ISR.
699 	 */
700 	if ((threaded_isr_poll == false) || !intr_info->op_reply_q)
701 		return ret;
702 
703 	if (!intr_info->op_reply_q->enable_irq_poll ||
704 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
705 		return ret;
706 
707 	disable_irq_nosync(intr_info->os_irq);
708 
709 	return IRQ_WAKE_THREAD;
710 }
711 
712 /**
713  * mpi3mr_isr_poll - Reply queue polling routine
714  * @irq: IRQ
715  * @privdata: Interrupt info
716  *
717  * poll for pending I/O completions in a loop until pending I/Os
718  * present or controller queue depth I/Os are processed.
719  *
720  * Return: IRQ_NONE or IRQ_HANDLED
721  */
mpi3mr_isr_poll(int irq,void * privdata)722 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
723 {
724 	struct mpi3mr_intr_info *intr_info = privdata;
725 	struct mpi3mr_ioc *mrioc;
726 	u16 midx;
727 	u32 num_op_reply = 0;
728 
729 	if (!intr_info || !intr_info->op_reply_q)
730 		return IRQ_NONE;
731 
732 	mrioc = intr_info->mrioc;
733 	midx = intr_info->msix_index;
734 
735 	/* Poll for pending IOs completions */
736 	do {
737 		if (!mrioc->intr_enabled || mrioc->unrecoverable)
738 			break;
739 
740 		if (!midx)
741 			mpi3mr_process_admin_reply_q(mrioc);
742 		if (intr_info->op_reply_q)
743 			num_op_reply +=
744 			    mpi3mr_process_op_reply_q(mrioc,
745 				intr_info->op_reply_q);
746 
747 		usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
748 
749 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
750 	    (num_op_reply < mrioc->max_host_ios));
751 
752 	intr_info->op_reply_q->enable_irq_poll = false;
753 	enable_irq(intr_info->os_irq);
754 
755 	return IRQ_HANDLED;
756 }
757 
758 #endif
759 
760 /**
761  * mpi3mr_request_irq - Request IRQ and register ISR
762  * @mrioc: Adapter instance reference
763  * @index: IRQ vector index
764  *
765  * Request threaded ISR with primary ISR and secondary
766  *
767  * Return: 0 on success and non zero on failures.
768  */
mpi3mr_request_irq(struct mpi3mr_ioc * mrioc,u16 index)769 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
770 {
771 	struct pci_dev *pdev = mrioc->pdev;
772 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
773 	int retval = 0;
774 
775 	intr_info->mrioc = mrioc;
776 	intr_info->msix_index = index;
777 	intr_info->op_reply_q = NULL;
778 
779 	scnprintf(intr_info->name, MPI3MR_NAME_LENGTH,
780 	    "%.32s%d-msix%u", mrioc->driver_name, mrioc->id, index);
781 
782 #ifndef CONFIG_PREEMPT_RT
783 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
784 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
785 #else
786 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
787 	    NULL, IRQF_SHARED, intr_info->name, intr_info);
788 #endif
789 	if (retval) {
790 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
791 		    intr_info->name, pci_irq_vector(pdev, index));
792 		return retval;
793 	}
794 
795 	intr_info->os_irq = pci_irq_vector(pdev, index);
796 	return retval;
797 }
798 
mpi3mr_calc_poll_queues(struct mpi3mr_ioc * mrioc,u16 max_vectors)799 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
800 {
801 	if (!mrioc->requested_poll_qcount)
802 		return;
803 
804 	/* Reserved for Admin and Default Queue */
805 	if (max_vectors > 2 &&
806 		(mrioc->requested_poll_qcount < max_vectors - 2)) {
807 		ioc_info(mrioc,
808 		    "enabled polled queues (%d) msix (%d)\n",
809 		    mrioc->requested_poll_qcount, max_vectors);
810 	} else {
811 		ioc_info(mrioc,
812 		    "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
813 		    mrioc->requested_poll_qcount, max_vectors);
814 		mrioc->requested_poll_qcount = 0;
815 	}
816 }
817 
818 /**
819  * mpi3mr_setup_isr - Setup ISR for the controller
820  * @mrioc: Adapter instance reference
821  * @setup_one: Request one IRQ or more
822  *
823  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
824  *
825  * Return: 0 on success and non zero on failures.
826  */
mpi3mr_setup_isr(struct mpi3mr_ioc * mrioc,u8 setup_one)827 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
828 {
829 	unsigned int irq_flags = PCI_IRQ_MSIX;
830 	int max_vectors, min_vec;
831 	int retval;
832 	int i;
833 	struct irq_affinity desc = { .pre_vectors =  1, .post_vectors = 1 };
834 
835 	if (mrioc->is_intr_info_set)
836 		return 0;
837 
838 	mpi3mr_cleanup_isr(mrioc);
839 
840 	if (setup_one || reset_devices) {
841 		max_vectors = 1;
842 		retval = pci_alloc_irq_vectors(mrioc->pdev,
843 		    1, max_vectors, irq_flags);
844 		if (retval < 0) {
845 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
846 			    retval);
847 			goto out_failed;
848 		}
849 	} else {
850 		max_vectors =
851 		    min_t(int, mrioc->cpu_count + 1 +
852 			mrioc->requested_poll_qcount, mrioc->msix_count);
853 
854 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
855 
856 		ioc_info(mrioc,
857 		    "MSI-X vectors supported: %d, no of cores: %d,",
858 		    mrioc->msix_count, mrioc->cpu_count);
859 		ioc_info(mrioc,
860 		    "MSI-x vectors requested: %d poll_queues %d\n",
861 		    max_vectors, mrioc->requested_poll_qcount);
862 
863 		desc.post_vectors = mrioc->requested_poll_qcount;
864 		min_vec = desc.pre_vectors + desc.post_vectors;
865 		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
866 
867 		retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
868 			min_vec, max_vectors, irq_flags, &desc);
869 
870 		if (retval < 0) {
871 			ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
872 			    retval);
873 			goto out_failed;
874 		}
875 
876 
877 		/*
878 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
879 		 * between Admin queue and operational queue
880 		 */
881 		if (retval == min_vec)
882 			mrioc->op_reply_q_offset = 0;
883 		else if (retval != (max_vectors)) {
884 			ioc_info(mrioc,
885 			    "allocated vectors (%d) are less than configured (%d)\n",
886 			    retval, max_vectors);
887 		}
888 
889 		max_vectors = retval;
890 		mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
891 
892 		mpi3mr_calc_poll_queues(mrioc, max_vectors);
893 
894 	}
895 
896 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
897 	    GFP_KERNEL);
898 	if (!mrioc->intr_info) {
899 		retval = -ENOMEM;
900 		pci_free_irq_vectors(mrioc->pdev);
901 		goto out_failed;
902 	}
903 	for (i = 0; i < max_vectors; i++) {
904 		retval = mpi3mr_request_irq(mrioc, i);
905 		if (retval) {
906 			mrioc->intr_info_count = i;
907 			goto out_failed;
908 		}
909 	}
910 	if (reset_devices || !setup_one)
911 		mrioc->is_intr_info_set = true;
912 	mrioc->intr_info_count = max_vectors;
913 	mpi3mr_ioc_enable_intr(mrioc);
914 	return 0;
915 
916 out_failed:
917 	mpi3mr_cleanup_isr(mrioc);
918 
919 	return retval;
920 }
921 
922 static const struct {
923 	enum mpi3mr_iocstate value;
924 	char *name;
925 } mrioc_states[] = {
926 	{ MRIOC_STATE_READY, "ready" },
927 	{ MRIOC_STATE_FAULT, "fault" },
928 	{ MRIOC_STATE_RESET, "reset" },
929 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
930 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
931 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
932 };
933 
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)934 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
935 {
936 	int i;
937 	char *name = NULL;
938 
939 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
940 		if (mrioc_states[i].value == mrioc_state) {
941 			name = mrioc_states[i].name;
942 			break;
943 		}
944 	}
945 	return name;
946 }
947 
948 /* Reset reason to name mapper structure*/
949 static const struct {
950 	enum mpi3mr_reset_reason value;
951 	char *name;
952 } mpi3mr_reset_reason_codes[] = {
953 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
954 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
955 	{ MPI3MR_RESET_FROM_APP, "application invocation" },
956 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
957 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
958 	{ MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
959 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
960 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
961 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
962 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
963 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
964 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
965 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
966 	{
967 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
968 		"create request queue timeout"
969 	},
970 	{
971 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
972 		"create reply queue timeout"
973 	},
974 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
975 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
976 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
977 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
978 	{
979 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
980 		"component image activation timeout"
981 	},
982 	{
983 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
984 		"get package version timeout"
985 	},
986 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
987 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
988 	{
989 		MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
990 		"diagnostic buffer post timeout"
991 	},
992 	{
993 		MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
994 		"diagnostic buffer release timeout"
995 	},
996 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
997 	{ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
998 	{ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
999 };
1000 
1001 /**
1002  * mpi3mr_reset_rc_name - get reset reason code name
1003  * @reason_code: reset reason code value
1004  *
1005  * Map reset reason to an NULL terminated ASCII string
1006  *
1007  * Return: name corresponding to reset reason value or NULL.
1008  */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1009 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1010 {
1011 	int i;
1012 	char *name = NULL;
1013 
1014 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
1015 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1016 			name = mpi3mr_reset_reason_codes[i].name;
1017 			break;
1018 		}
1019 	}
1020 	return name;
1021 }
1022 
1023 /* Reset type to name mapper structure*/
1024 static const struct {
1025 	u16 reset_type;
1026 	char *name;
1027 } mpi3mr_reset_types[] = {
1028 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1029 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1030 };
1031 
1032 /**
1033  * mpi3mr_reset_type_name - get reset type name
1034  * @reset_type: reset type value
1035  *
1036  * Map reset type to an NULL terminated ASCII string
1037  *
1038  * Return: name corresponding to reset type value or NULL.
1039  */
mpi3mr_reset_type_name(u16 reset_type)1040 static const char *mpi3mr_reset_type_name(u16 reset_type)
1041 {
1042 	int i;
1043 	char *name = NULL;
1044 
1045 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1046 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
1047 			name = mpi3mr_reset_types[i].name;
1048 			break;
1049 		}
1050 	}
1051 	return name;
1052 }
1053 
1054 /**
1055  * mpi3mr_is_fault_recoverable - Read fault code and decide
1056  * whether the controller can be recoverable
1057  * @mrioc: Adapter instance reference
1058  * Return: true if fault is recoverable, false otherwise.
1059  */
mpi3mr_is_fault_recoverable(struct mpi3mr_ioc * mrioc)1060 static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
1061 {
1062 	u32 fault;
1063 
1064 	fault = (readl(&mrioc->sysif_regs->fault) &
1065 		      MPI3_SYSIF_FAULT_CODE_MASK);
1066 
1067 	switch (fault) {
1068 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
1069 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
1070 		ioc_warn(mrioc,
1071 		    "controller requires system power cycle, marking controller as unrecoverable\n");
1072 		return false;
1073 	case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
1074 		ioc_warn(mrioc,
1075 		    "controller faulted due to insufficient power,\n"
1076 		    " try by connecting it to a different slot\n");
1077 		return false;
1078 	default:
1079 		break;
1080 	}
1081 	return true;
1082 }
1083 
1084 /**
1085  * mpi3mr_print_fault_info - Display fault information
1086  * @mrioc: Adapter instance reference
1087  *
1088  * Display the controller fault information if there is a
1089  * controller fault.
1090  *
1091  * Return: Nothing.
1092  */
mpi3mr_print_fault_info(struct mpi3mr_ioc * mrioc)1093 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1094 {
1095 	u32 ioc_status, code, code1, code2, code3;
1096 
1097 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1098 
1099 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1100 		code = readl(&mrioc->sysif_regs->fault);
1101 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1102 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1103 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1104 
1105 		ioc_info(mrioc,
1106 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1107 		    code, code1, code2, code3);
1108 	}
1109 }
1110 
1111 /**
1112  * mpi3mr_save_fault_info - Save fault information
1113  * @mrioc: Adapter instance reference
1114  *
1115  * Save the controller fault information if there is a
1116  * controller fault.
1117  *
1118  * Return: Nothing.
1119  */
mpi3mr_save_fault_info(struct mpi3mr_ioc * mrioc)1120 static void mpi3mr_save_fault_info(struct mpi3mr_ioc *mrioc)
1121 {
1122 	u32 ioc_status, i;
1123 
1124 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1125 
1126 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1127 		mrioc->saved_fault_code = readl(&mrioc->sysif_regs->fault) &
1128 		    MPI3_SYSIF_FAULT_CODE_MASK;
1129 		for (i = 0; i < 3; i++) {
1130 			mrioc->saved_fault_info[i] =
1131 			readl(&mrioc->sysif_regs->fault_info[i]);
1132 		}
1133 	}
1134 }
1135 
1136 /**
1137  * mpi3mr_get_iocstate - Get IOC State
1138  * @mrioc: Adapter instance reference
1139  *
1140  * Return a proper IOC state enum based on the IOC status and
1141  * IOC configuration and unrcoverable state of the controller.
1142  *
1143  * Return: Current IOC state.
1144  */
mpi3mr_get_iocstate(struct mpi3mr_ioc * mrioc)1145 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1146 {
1147 	u32 ioc_status, ioc_config;
1148 	u8 ready, enabled;
1149 
1150 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1151 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1152 
1153 	if (mrioc->unrecoverable)
1154 		return MRIOC_STATE_UNRECOVERABLE;
1155 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1156 		return MRIOC_STATE_FAULT;
1157 
1158 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1159 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1160 
1161 	if (ready && enabled)
1162 		return MRIOC_STATE_READY;
1163 	if ((!ready) && (!enabled))
1164 		return MRIOC_STATE_RESET;
1165 	if ((!ready) && (enabled))
1166 		return MRIOC_STATE_BECOMING_READY;
1167 
1168 	return MRIOC_STATE_RESET_REQUESTED;
1169 }
1170 
1171 /**
1172  * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1173  * @mrioc: Adapter instance reference
1174  *
1175  * Free the DMA memory allocated for IOCTL handling purpose.
1176  *
1177  * Return: None
1178  */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1179 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1180 {
1181 	struct dma_memory_desc *mem_desc;
1182 	u16 i;
1183 
1184 	if (!mrioc->ioctl_dma_pool)
1185 		return;
1186 
1187 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1188 		mem_desc = &mrioc->ioctl_sge[i];
1189 		if (mem_desc->addr) {
1190 			dma_pool_free(mrioc->ioctl_dma_pool,
1191 				      mem_desc->addr,
1192 				      mem_desc->dma_addr);
1193 			mem_desc->addr = NULL;
1194 		}
1195 	}
1196 	dma_pool_destroy(mrioc->ioctl_dma_pool);
1197 	mrioc->ioctl_dma_pool = NULL;
1198 	mem_desc = &mrioc->ioctl_chain_sge;
1199 
1200 	if (mem_desc->addr) {
1201 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1202 				  mem_desc->addr, mem_desc->dma_addr);
1203 		mem_desc->addr = NULL;
1204 	}
1205 	mem_desc = &mrioc->ioctl_resp_sge;
1206 	if (mem_desc->addr) {
1207 		dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1208 				  mem_desc->addr, mem_desc->dma_addr);
1209 		mem_desc->addr = NULL;
1210 	}
1211 
1212 	mrioc->ioctl_sges_allocated = false;
1213 }
1214 
1215 /**
1216  * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1217  * @mrioc: Adapter instance reference
1218  *
1219  * This function allocates dmaable memory required to handle the
1220  * application issued MPI3 IOCTL requests.
1221  *
1222  * Return: None
1223  */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1224 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1225 
1226 {
1227 	struct dma_memory_desc *mem_desc;
1228 	u16 i;
1229 
1230 	mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1231 						&mrioc->pdev->dev,
1232 						MPI3MR_IOCTL_SGE_SIZE,
1233 						MPI3MR_PAGE_SIZE_4K, 0);
1234 
1235 	if (!mrioc->ioctl_dma_pool) {
1236 		ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1237 		goto out_failed;
1238 	}
1239 
1240 	for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1241 		mem_desc = &mrioc->ioctl_sge[i];
1242 		mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1243 		mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1244 						 GFP_KERNEL,
1245 						 &mem_desc->dma_addr);
1246 		if (!mem_desc->addr)
1247 			goto out_failed;
1248 	}
1249 
1250 	mem_desc = &mrioc->ioctl_chain_sge;
1251 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1252 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1253 					    mem_desc->size,
1254 					    &mem_desc->dma_addr,
1255 					    GFP_KERNEL);
1256 	if (!mem_desc->addr)
1257 		goto out_failed;
1258 
1259 	mem_desc = &mrioc->ioctl_resp_sge;
1260 	mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1261 	mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1262 					    mem_desc->size,
1263 					    &mem_desc->dma_addr,
1264 					    GFP_KERNEL);
1265 	if (!mem_desc->addr)
1266 		goto out_failed;
1267 
1268 	mrioc->ioctl_sges_allocated = true;
1269 
1270 	return;
1271 out_failed:
1272 	ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1273 		 "from the applications, application interface for MPT command is disabled\n");
1274 	mpi3mr_free_ioctl_dma_memory(mrioc);
1275 }
1276 
1277 /**
1278  * mpi3mr_fault_uevent_emit - Emit uevent for any controller
1279  * fault
1280  * @mrioc: Pointer to the mpi3mr_ioc structure for the controller instance
1281  *
1282  * This function is invoked when the controller undergoes any
1283  * type of fault.
1284  */
1285 
mpi3mr_fault_uevent_emit(struct mpi3mr_ioc * mrioc)1286 static void mpi3mr_fault_uevent_emit(struct mpi3mr_ioc *mrioc)
1287 {
1288 	struct kobj_uevent_env *env;
1289 	int ret;
1290 
1291 	env = kzalloc_obj(*env);
1292 	if (!env)
1293 		return;
1294 
1295 	ret = add_uevent_var(env, "DRIVER=%s", mrioc->driver_name);
1296 	if (ret)
1297 		goto out_free;
1298 
1299 	ret = add_uevent_var(env, "IOC_ID=%u", mrioc->id);
1300 	if (ret)
1301 		goto out_free;
1302 
1303 	ret = add_uevent_var(env, "FAULT_CODE=0x%08x",
1304 			    mrioc->saved_fault_code);
1305 	if (ret)
1306 		goto out_free;
1307 
1308 	ret = add_uevent_var(env, "FAULT_INFO0=0x%08x",
1309 			     mrioc->saved_fault_info[0]);
1310 	if (ret)
1311 		goto out_free;
1312 
1313 	ret = add_uevent_var(env, "FAULT_INFO1=0x%08x",
1314 			    mrioc->saved_fault_info[1]);
1315 	if (ret)
1316 		goto out_free;
1317 
1318 	ret = add_uevent_var(env, "FAULT_INFO2=0x%08x",
1319 			    mrioc->saved_fault_info[2]);
1320 	if (ret)
1321 		goto out_free;
1322 
1323 	kobject_uevent_env(&mrioc->shost->shost_gendev.kobj,
1324 			KOBJ_CHANGE, env->envp);
1325 
1326 out_free:
1327 	kfree(env);
1328 
1329 }
1330 
1331 /**
1332  * mpi3mr_clear_reset_history - clear reset history
1333  * @mrioc: Adapter instance reference
1334  *
1335  * Write the reset history bit in IOC status to clear the bit,
1336  * if it is already set.
1337  *
1338  * Return: Nothing.
1339  */
mpi3mr_clear_reset_history(struct mpi3mr_ioc * mrioc)1340 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1341 {
1342 	u32 ioc_status;
1343 
1344 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1345 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1346 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1347 }
1348 
1349 /**
1350  * mpi3mr_issue_and_process_mur - Message unit Reset handler
1351  * @mrioc: Adapter instance reference
1352  * @reset_reason: Reset reason code
1353  *
1354  * Issue Message unit Reset to the controller and wait for it to
1355  * be complete.
1356  *
1357  * Return: 0 on success, -1 on failure.
1358  */
mpi3mr_issue_and_process_mur(struct mpi3mr_ioc * mrioc,u32 reset_reason)1359 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1360 	u32 reset_reason)
1361 {
1362 	u32 ioc_config, timeout, ioc_status, scratch_pad0;
1363 	int retval = -1;
1364 
1365 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1366 	if (mrioc->unrecoverable) {
1367 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1368 		return retval;
1369 	}
1370 	mpi3mr_clear_reset_history(mrioc);
1371 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1372 			 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1373 			(mrioc->facts.ioc_num <<
1374 			 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1375 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1376 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1377 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1378 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1379 
1380 	timeout = MPI3MR_MUR_TIMEOUT * 10;
1381 	do {
1382 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1383 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1384 			mpi3mr_clear_reset_history(mrioc);
1385 			break;
1386 		}
1387 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1388 			mpi3mr_print_fault_info(mrioc);
1389 			break;
1390 		}
1391 		msleep(100);
1392 	} while (--timeout);
1393 
1394 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1395 	if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1396 	      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1397 	      (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1398 		retval = 0;
1399 
1400 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
1401 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1402 	return retval;
1403 }
1404 
1405 /**
1406  * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1407  * during reset/resume
1408  * @mrioc: Adapter instance reference
1409  *
1410  * Return: zero if the new IOCFacts parameters value is compatible with
1411  * older values else return -EPERM
1412  */
1413 static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc * mrioc)1414 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1415 {
1416 	unsigned long *removepend_bitmap;
1417 
1418 	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1419 		ioc_err(mrioc,
1420 		    "cannot increase reply size from %d to %d\n",
1421 		    mrioc->reply_sz, mrioc->facts.reply_sz);
1422 		return -EPERM;
1423 	}
1424 
1425 	if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1426 		ioc_err(mrioc,
1427 		    "cannot reduce number of operational reply queues from %d to %d\n",
1428 		    mrioc->num_op_reply_q,
1429 		    mrioc->facts.max_op_reply_q);
1430 		return -EPERM;
1431 	}
1432 
1433 	if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1434 		ioc_err(mrioc,
1435 		    "cannot reduce number of operational request queues from %d to %d\n",
1436 		    mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1437 		return -EPERM;
1438 	}
1439 
1440 	if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1441 		ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1442 			    "\tchanged after reset: previous(%d), new(%d),\n"
1443 			    "the driver cannot change this at run time\n",
1444 			    mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1445 
1446 	if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1447 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1448 		ioc_err(mrioc,
1449 		    "critical error: multipath capability is enabled at the\n"
1450 		    "\tcontroller while sas transport support is enabled at the\n"
1451 		    "\tdriver, please reboot the system or reload the driver\n");
1452 
1453 	if (mrioc->seg_tb_support) {
1454 		if (!(mrioc->facts.ioc_capabilities &
1455 		     MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
1456 			ioc_err(mrioc,
1457 			    "critical error: previously enabled segmented trace\n"
1458 			    " buffer capability is disabled after reset. Please\n"
1459 			    " update the firmware or reboot the system or\n"
1460 			    " reload the driver to enable trace diag buffer\n");
1461 			mrioc->diag_buffers[0].disabled_after_reset = true;
1462 		} else
1463 			mrioc->diag_buffers[0].disabled_after_reset = false;
1464 	}
1465 
1466 	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1467 		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1468 						  GFP_KERNEL);
1469 		if (!removepend_bitmap) {
1470 			ioc_err(mrioc,
1471 				"failed to increase removepend_bitmap bits from %d to %d\n",
1472 				mrioc->dev_handle_bitmap_bits,
1473 				mrioc->facts.max_devhandle);
1474 			return -EPERM;
1475 		}
1476 		bitmap_free(mrioc->removepend_bitmap);
1477 		mrioc->removepend_bitmap = removepend_bitmap;
1478 		ioc_info(mrioc,
1479 			 "increased bits of dev_handle_bitmap from %d to %d\n",
1480 			 mrioc->dev_handle_bitmap_bits,
1481 			 mrioc->facts.max_devhandle);
1482 		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1483 	}
1484 
1485 	return 0;
1486 }
1487 
1488 /**
1489  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1490  * @mrioc: Adapter instance reference
1491  *
1492  * Set Enable IOC bit in IOC configuration register and wait for
1493  * the controller to become ready.
1494  *
1495  * Return: 0 on success, appropriate error on failure.
1496  */
mpi3mr_bring_ioc_ready(struct mpi3mr_ioc * mrioc)1497 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1498 {
1499 	u32 ioc_config, ioc_status, timeout, host_diagnostic;
1500 	int retval = 0;
1501 	enum mpi3mr_iocstate ioc_state;
1502 	u64 base_info;
1503 	u8 retry = 0;
1504 	u64 start_time, elapsed_time_sec;
1505 
1506 retry_bring_ioc_ready:
1507 
1508 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1509 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1510 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1511 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1512 	    ioc_status, ioc_config, base_info);
1513 
1514 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
1515 		mrioc->unrecoverable = 1;
1516 		goto out_device_not_present;
1517 	}
1518 
1519 	/*The timeout value is in 2sec unit, changing it to seconds*/
1520 	mrioc->ready_timeout =
1521 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1522 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1523 
1524 	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1525 
1526 	ioc_state = mpi3mr_get_iocstate(mrioc);
1527 	ioc_info(mrioc, "controller is in %s state during detection\n",
1528 	    mpi3mr_iocstate_name(ioc_state));
1529 
1530 	timeout = mrioc->ready_timeout * 10;
1531 
1532 	do {
1533 		ioc_state = mpi3mr_get_iocstate(mrioc);
1534 
1535 		if (ioc_state != MRIOC_STATE_BECOMING_READY &&
1536 		    ioc_state != MRIOC_STATE_RESET_REQUESTED)
1537 			break;
1538 
1539 		if (!pci_device_is_present(mrioc->pdev)) {
1540 			mrioc->unrecoverable = 1;
1541 			ioc_err(mrioc, "controller is not present while waiting to reset\n");
1542 			goto out_device_not_present;
1543 		}
1544 
1545 		msleep(100);
1546 	} while (--timeout);
1547 
1548 	if (ioc_state == MRIOC_STATE_READY) {
1549 		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1550 		retval = mpi3mr_issue_and_process_mur(mrioc,
1551 		    MPI3MR_RESET_FROM_BRINGUP);
1552 		ioc_state = mpi3mr_get_iocstate(mrioc);
1553 		if (retval)
1554 			ioc_err(mrioc,
1555 			    "message unit reset failed with error %d current state %s\n",
1556 			    retval, mpi3mr_iocstate_name(ioc_state));
1557 	}
1558 	if (ioc_state != MRIOC_STATE_RESET) {
1559 		if (ioc_state == MRIOC_STATE_FAULT) {
1560 			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1561 			mpi3mr_print_fault_info(mrioc);
1562 			mpi3mr_save_fault_info(mrioc);
1563 			mrioc->fault_during_init = 1;
1564 			mrioc->fwfault_counter++;
1565 
1566 			do {
1567 				host_diagnostic =
1568 					readl(&mrioc->sysif_regs->host_diagnostic);
1569 				if (!(host_diagnostic &
1570 				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1571 					break;
1572 				if (!pci_device_is_present(mrioc->pdev)) {
1573 					mrioc->unrecoverable = 1;
1574 					ioc_err(mrioc, "controller is not present at the bringup\n");
1575 					goto out_device_not_present;
1576 				}
1577 				msleep(100);
1578 			} while (--timeout);
1579 		}
1580 		mpi3mr_print_fault_info(mrioc);
1581 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1582 		retval = mpi3mr_issue_reset(mrioc,
1583 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1584 		    MPI3MR_RESET_FROM_BRINGUP);
1585 		if (retval) {
1586 			ioc_err(mrioc,
1587 			    "soft reset failed with error %d\n", retval);
1588 			goto out_failed;
1589 		}
1590 	}
1591 	ioc_state = mpi3mr_get_iocstate(mrioc);
1592 	if (ioc_state != MRIOC_STATE_RESET) {
1593 		ioc_err(mrioc,
1594 		    "cannot bring controller to reset state, current state: %s\n",
1595 		    mpi3mr_iocstate_name(ioc_state));
1596 		goto out_failed;
1597 	}
1598 	mpi3mr_clear_reset_history(mrioc);
1599 	retval = mpi3mr_setup_admin_qpair(mrioc);
1600 	if (retval) {
1601 		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1602 		    retval);
1603 		goto out_failed;
1604 	}
1605 
1606 	ioc_info(mrioc, "bringing controller to ready state\n");
1607 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1608 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1609 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1610 
1611 	if (retry == 0)
1612 		start_time = jiffies;
1613 
1614 	timeout = mrioc->ready_timeout * 10;
1615 	do {
1616 		ioc_state = mpi3mr_get_iocstate(mrioc);
1617 		if (ioc_state == MRIOC_STATE_READY) {
1618 			ioc_info(mrioc,
1619 			    "successfully transitioned to %s state\n",
1620 			    mpi3mr_iocstate_name(ioc_state));
1621 			mpi3mr_clear_reset_history(mrioc);
1622 			return 0;
1623 		}
1624 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1625 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1626 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1627 			mpi3mr_print_fault_info(mrioc);
1628 			goto out_failed;
1629 		}
1630 		if (!pci_device_is_present(mrioc->pdev)) {
1631 			mrioc->unrecoverable = 1;
1632 			ioc_err(mrioc,
1633 			    "controller is not present at the bringup\n");
1634 			retval = -1;
1635 			goto out_device_not_present;
1636 		}
1637 		msleep(100);
1638 		elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1639 	} while (elapsed_time_sec < mrioc->ready_timeout);
1640 
1641 	ioc_state = mpi3mr_get_iocstate(mrioc);
1642 	if (ioc_state == MRIOC_STATE_READY) {
1643 		ioc_info(mrioc,
1644 		    "successfully transitioned to %s state after %llu seconds\n",
1645 		    mpi3mr_iocstate_name(ioc_state), elapsed_time_sec);
1646 		mpi3mr_clear_reset_history(mrioc);
1647 		return 0;
1648 	}
1649 
1650 out_failed:
1651 	elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1652 	if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
1653 		retry++;
1654 
1655 		ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
1656 				" elapsed time =%llu\n", retry, elapsed_time_sec);
1657 
1658 		goto retry_bring_ioc_ready;
1659 	}
1660 	ioc_state = mpi3mr_get_iocstate(mrioc);
1661 	ioc_err(mrioc,
1662 	    "failed to bring to ready state,  current state: %s\n",
1663 	    mpi3mr_iocstate_name(ioc_state));
1664 out_device_not_present:
1665 	return retval;
1666 }
1667 
1668 /**
1669  * mpi3mr_soft_reset_success - Check softreset is success or not
1670  * @ioc_status: IOC status register value
1671  * @ioc_config: IOC config register value
1672  *
1673  * Check whether the soft reset is successful or not based on
1674  * IOC status and IOC config register values.
1675  *
1676  * Return: True when the soft reset is success, false otherwise.
1677  */
1678 static inline bool
mpi3mr_soft_reset_success(u32 ioc_status,u32 ioc_config)1679 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1680 {
1681 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1682 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1683 		return true;
1684 	return false;
1685 }
1686 
1687 /**
1688  * mpi3mr_diagfault_success - Check diag fault is success or not
1689  * @mrioc: Adapter reference
1690  * @ioc_status: IOC status register value
1691  *
1692  * Check whether the controller hit diag reset fault code.
1693  *
1694  * Return: True when there is diag fault, false otherwise.
1695  */
mpi3mr_diagfault_success(struct mpi3mr_ioc * mrioc,u32 ioc_status)1696 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1697 	u32 ioc_status)
1698 {
1699 	u32 fault;
1700 
1701 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1702 		return false;
1703 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1704 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1705 		mpi3mr_print_fault_info(mrioc);
1706 		return true;
1707 	}
1708 	return false;
1709 }
1710 
1711 /**
1712  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1713  * @mrioc: Adapter reference
1714  *
1715  * Set diag save bit in IOC configuration register to enable
1716  * snapdump.
1717  *
1718  * Return: Nothing.
1719  */
mpi3mr_set_diagsave(struct mpi3mr_ioc * mrioc)1720 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1721 {
1722 	u32 ioc_config;
1723 
1724 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1725 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1726 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1727 }
1728 
1729 /**
1730  * mpi3mr_issue_reset - Issue reset to the controller
1731  * @mrioc: Adapter reference
1732  * @reset_type: Reset type
1733  * @reset_reason: Reset reason code
1734  *
1735  * Unlock the host diagnostic registers and write the specific
1736  * reset type to that, wait for reset acknowledgment from the
1737  * controller, if the reset is not successful retry for the
1738  * predefined number of times.
1739  *
1740  * Return: 0 on success, non-zero on failure.
1741  */
mpi3mr_issue_reset(struct mpi3mr_ioc * mrioc,u16 reset_type,u16 reset_reason)1742 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1743 	u16 reset_reason)
1744 {
1745 	int retval = -1;
1746 	u8 unlock_retry_count = 0;
1747 	u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1748 	u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1749 
1750 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1751 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1752 		return retval;
1753 	if (mrioc->unrecoverable)
1754 		return retval;
1755 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1756 		retval = 0;
1757 		return retval;
1758 	}
1759 
1760 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1761 	    mpi3mr_reset_type_name(reset_type),
1762 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1763 
1764 	mpi3mr_clear_reset_history(mrioc);
1765 	do {
1766 		ioc_info(mrioc,
1767 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1768 		    ++unlock_retry_count);
1769 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1770 			ioc_err(mrioc,
1771 			    "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1772 			    mpi3mr_reset_type_name(reset_type),
1773 			    host_diagnostic);
1774 			mrioc->unrecoverable = 1;
1775 			return retval;
1776 		}
1777 
1778 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1779 		    &mrioc->sysif_regs->write_sequence);
1780 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1781 		    &mrioc->sysif_regs->write_sequence);
1782 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1783 		    &mrioc->sysif_regs->write_sequence);
1784 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1785 		    &mrioc->sysif_regs->write_sequence);
1786 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1787 		    &mrioc->sysif_regs->write_sequence);
1788 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1789 		    &mrioc->sysif_regs->write_sequence);
1790 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1791 		    &mrioc->sysif_regs->write_sequence);
1792 		usleep_range(1000, 1100);
1793 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1794 		ioc_info(mrioc,
1795 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1796 		    unlock_retry_count, host_diagnostic);
1797 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1798 
1799 	scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1800 	    MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1801 	    MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1802 	writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1803 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)
1804 		mpi3mr_set_diagsave(mrioc);
1805 	writel(host_diagnostic | reset_type,
1806 	    &mrioc->sysif_regs->host_diagnostic);
1807 	switch (reset_type) {
1808 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1809 		do {
1810 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1811 			ioc_config =
1812 			    readl(&mrioc->sysif_regs->ioc_configuration);
1813 			if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1814 			    && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1815 			    ) {
1816 				mpi3mr_clear_reset_history(mrioc);
1817 				retval = 0;
1818 				break;
1819 			}
1820 			msleep(100);
1821 		} while (--timeout);
1822 		mpi3mr_print_fault_info(mrioc);
1823 		break;
1824 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1825 		do {
1826 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1827 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1828 				retval = 0;
1829 				break;
1830 			}
1831 			msleep(100);
1832 		} while (--timeout);
1833 		break;
1834 	default:
1835 		break;
1836 	}
1837 
1838 	writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1839 	    &mrioc->sysif_regs->write_sequence);
1840 
1841 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1842 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1843 	ioc_info(mrioc,
1844 	    "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
1845 	    (!retval)?"successful":"failed", ioc_status,
1846 	    ioc_config);
1847 	if (retval)
1848 		mrioc->unrecoverable = 1;
1849 	return retval;
1850 }
1851 
1852 /**
1853  * mpi3mr_admin_request_post - Post request to admin queue
1854  * @mrioc: Adapter reference
1855  * @admin_req: MPI3 request
1856  * @admin_req_sz: Request size
1857  * @ignore_reset: Ignore reset in process
1858  *
1859  * Post the MPI3 request into admin request queue and
1860  * inform the controller, if the queue is full return
1861  * appropriate error.
1862  *
1863  * Return: 0 on success, non-zero on failure.
1864  */
mpi3mr_admin_request_post(struct mpi3mr_ioc * mrioc,void * admin_req,u16 admin_req_sz,u8 ignore_reset)1865 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1866 	u16 admin_req_sz, u8 ignore_reset)
1867 {
1868 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1869 	int retval = 0;
1870 	unsigned long flags;
1871 	u8 *areq_entry;
1872 
1873 	if (mrioc->unrecoverable) {
1874 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1875 		return -EFAULT;
1876 	}
1877 
1878 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1879 	areq_pi = mrioc->admin_req_pi;
1880 	areq_ci = mrioc->admin_req_ci;
1881 	max_entries = mrioc->num_admin_req;
1882 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1883 	    (areq_pi == (max_entries - 1)))) {
1884 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1885 		retval = -EAGAIN;
1886 		goto out;
1887 	}
1888 	if (!ignore_reset && mrioc->reset_in_progress) {
1889 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1890 		retval = -EAGAIN;
1891 		goto out;
1892 	}
1893 	if (mrioc->pci_err_recovery) {
1894 		ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1895 		retval = -EAGAIN;
1896 		goto out;
1897 	}
1898 
1899 	areq_entry = (u8 *)mrioc->admin_req_base +
1900 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1901 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1902 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1903 
1904 	if (++areq_pi == max_entries)
1905 		areq_pi = 0;
1906 	mrioc->admin_req_pi = areq_pi;
1907 
1908 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1909 
1910 out:
1911 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1912 
1913 	return retval;
1914 }
1915 
1916 /**
1917  * mpi3mr_free_op_req_q_segments - free request memory segments
1918  * @mrioc: Adapter instance reference
1919  * @q_idx: operational request queue index
1920  *
1921  * Free memory segments allocated for operational request queue
1922  *
1923  * Return: Nothing.
1924  */
mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1925 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1926 {
1927 	u16 j;
1928 	int size;
1929 	struct segments *segments;
1930 
1931 	segments = mrioc->req_qinfo[q_idx].q_segments;
1932 	if (!segments)
1933 		return;
1934 
1935 	if (mrioc->enable_segqueue) {
1936 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1937 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1938 			dma_free_coherent(&mrioc->pdev->dev,
1939 			    MPI3MR_MAX_SEG_LIST_SIZE,
1940 			    mrioc->req_qinfo[q_idx].q_segment_list,
1941 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1942 			mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1943 		}
1944 	} else
1945 		size = mrioc->req_qinfo[q_idx].segment_qd *
1946 		    mrioc->facts.op_req_sz;
1947 
1948 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1949 		if (!segments[j].segment)
1950 			continue;
1951 		dma_free_coherent(&mrioc->pdev->dev,
1952 		    size, segments[j].segment, segments[j].segment_dma);
1953 		segments[j].segment = NULL;
1954 	}
1955 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1956 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1957 	mrioc->req_qinfo[q_idx].qid = 0;
1958 }
1959 
1960 /**
1961  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1962  * @mrioc: Adapter instance reference
1963  * @q_idx: operational reply queue index
1964  *
1965  * Free memory segments allocated for operational reply queue
1966  *
1967  * Return: Nothing.
1968  */
mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1969 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1970 {
1971 	u16 j;
1972 	int size;
1973 	struct segments *segments;
1974 
1975 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1976 	if (!segments)
1977 		return;
1978 
1979 	if (mrioc->enable_segqueue) {
1980 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1981 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1982 			dma_free_coherent(&mrioc->pdev->dev,
1983 			    MPI3MR_MAX_SEG_LIST_SIZE,
1984 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1985 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1986 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1987 		}
1988 	} else
1989 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1990 		    mrioc->op_reply_desc_sz;
1991 
1992 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1993 		if (!segments[j].segment)
1994 			continue;
1995 		dma_free_coherent(&mrioc->pdev->dev,
1996 		    size, segments[j].segment, segments[j].segment_dma);
1997 		segments[j].segment = NULL;
1998 	}
1999 
2000 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
2001 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
2002 	mrioc->op_reply_qinfo[q_idx].qid = 0;
2003 }
2004 
2005 /**
2006  * mpi3mr_delete_op_reply_q - delete operational reply queue
2007  * @mrioc: Adapter instance reference
2008  * @qidx: operational reply queue index
2009  *
2010  * Delete operatinal reply queue by issuing MPI request
2011  * through admin queue.
2012  *
2013  * Return:  0 on success, non-zero on failure.
2014  */
mpi3mr_delete_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2015 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2016 {
2017 	struct mpi3_delete_reply_queue_request delq_req;
2018 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2019 	int retval = 0;
2020 	u16 reply_qid = 0, midx;
2021 
2022 	reply_qid = op_reply_q->qid;
2023 
2024 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2025 
2026 	if (!reply_qid)	{
2027 		retval = -1;
2028 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
2029 		goto out;
2030 	}
2031 
2032 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
2033 	    mrioc->active_poll_qcount--;
2034 
2035 	memset(&delq_req, 0, sizeof(delq_req));
2036 	mutex_lock(&mrioc->init_cmds.mutex);
2037 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2038 		retval = -1;
2039 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
2040 		mutex_unlock(&mrioc->init_cmds.mutex);
2041 		goto out;
2042 	}
2043 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2044 	mrioc->init_cmds.is_waiting = 1;
2045 	mrioc->init_cmds.callback = NULL;
2046 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2047 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
2048 	delq_req.queue_id = cpu_to_le16(reply_qid);
2049 
2050 	init_completion(&mrioc->init_cmds.done);
2051 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
2052 	    1);
2053 	if (retval) {
2054 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
2055 		goto out_unlock;
2056 	}
2057 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2058 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2059 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2060 		ioc_err(mrioc, "delete reply queue timed out\n");
2061 		mpi3mr_check_rh_fault_ioc(mrioc,
2062 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
2063 		retval = -1;
2064 		goto out_unlock;
2065 	}
2066 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2067 	    != MPI3_IOCSTATUS_SUCCESS) {
2068 		ioc_err(mrioc,
2069 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2070 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2071 		    mrioc->init_cmds.ioc_loginfo);
2072 		retval = -1;
2073 		goto out_unlock;
2074 	}
2075 	mrioc->intr_info[midx].op_reply_q = NULL;
2076 
2077 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2078 out_unlock:
2079 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2080 	mutex_unlock(&mrioc->init_cmds.mutex);
2081 out:
2082 
2083 	return retval;
2084 }
2085 
2086 /**
2087  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
2088  * @mrioc: Adapter instance reference
2089  * @qidx: request queue index
2090  *
2091  * Allocate segmented memory pools for operational reply
2092  * queue.
2093  *
2094  * Return: 0 on success, non-zero on failure.
2095  */
mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2096 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2097 {
2098 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2099 	int i, size;
2100 	u64 *q_segment_list_entry = NULL;
2101 	struct segments *segments;
2102 
2103 	if (mrioc->enable_segqueue) {
2104 		op_reply_q->segment_qd =
2105 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
2106 
2107 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
2108 
2109 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2110 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
2111 		    GFP_KERNEL);
2112 		if (!op_reply_q->q_segment_list)
2113 			return -ENOMEM;
2114 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
2115 	} else {
2116 		op_reply_q->segment_qd = op_reply_q->num_replies;
2117 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
2118 	}
2119 
2120 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
2121 	    op_reply_q->segment_qd);
2122 
2123 	op_reply_q->q_segments = kzalloc_objs(struct segments,
2124 					      op_reply_q->num_segments);
2125 	if (!op_reply_q->q_segments)
2126 		return -ENOMEM;
2127 
2128 	segments = op_reply_q->q_segments;
2129 	for (i = 0; i < op_reply_q->num_segments; i++) {
2130 		segments[i].segment =
2131 		    dma_alloc_coherent(&mrioc->pdev->dev,
2132 		    size, &segments[i].segment_dma, GFP_KERNEL);
2133 		if (!segments[i].segment)
2134 			return -ENOMEM;
2135 		if (mrioc->enable_segqueue)
2136 			q_segment_list_entry[i] =
2137 			    (unsigned long)segments[i].segment_dma;
2138 	}
2139 
2140 	return 0;
2141 }
2142 
2143 /**
2144  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
2145  * @mrioc: Adapter instance reference
2146  * @qidx: request queue index
2147  *
2148  * Allocate segmented memory pools for operational request
2149  * queue.
2150  *
2151  * Return: 0 on success, non-zero on failure.
2152  */
mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2153 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2154 {
2155 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
2156 	int i, size;
2157 	u64 *q_segment_list_entry = NULL;
2158 	struct segments *segments;
2159 
2160 	if (mrioc->enable_segqueue) {
2161 		op_req_q->segment_qd =
2162 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
2163 
2164 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
2165 
2166 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2167 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
2168 		    GFP_KERNEL);
2169 		if (!op_req_q->q_segment_list)
2170 			return -ENOMEM;
2171 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
2172 
2173 	} else {
2174 		op_req_q->segment_qd = op_req_q->num_requests;
2175 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
2176 	}
2177 
2178 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
2179 	    op_req_q->segment_qd);
2180 
2181 	op_req_q->q_segments = kzalloc_objs(struct segments,
2182 					    op_req_q->num_segments);
2183 	if (!op_req_q->q_segments)
2184 		return -ENOMEM;
2185 
2186 	segments = op_req_q->q_segments;
2187 	for (i = 0; i < op_req_q->num_segments; i++) {
2188 		segments[i].segment =
2189 		    dma_alloc_coherent(&mrioc->pdev->dev,
2190 		    size, &segments[i].segment_dma, GFP_KERNEL);
2191 		if (!segments[i].segment)
2192 			return -ENOMEM;
2193 		if (mrioc->enable_segqueue)
2194 			q_segment_list_entry[i] =
2195 			    (unsigned long)segments[i].segment_dma;
2196 	}
2197 
2198 	return 0;
2199 }
2200 
2201 /**
2202  * mpi3mr_create_op_reply_q - create operational reply queue
2203  * @mrioc: Adapter instance reference
2204  * @qidx: operational reply queue index
2205  *
2206  * Create operatinal reply queue by issuing MPI request
2207  * through admin queue.
2208  *
2209  * Return:  0 on success, non-zero on failure.
2210  */
mpi3mr_create_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2211 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2212 {
2213 	struct mpi3_create_reply_queue_request create_req;
2214 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2215 	int retval = 0;
2216 	u16 reply_qid = 0, midx;
2217 
2218 	reply_qid = op_reply_q->qid;
2219 
2220 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2221 
2222 	if (reply_qid) {
2223 		retval = -1;
2224 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2225 		    reply_qid);
2226 
2227 		return retval;
2228 	}
2229 
2230 	reply_qid = qidx + 1;
2231 
2232 	if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
2233 		if (mrioc->pdev->revision)
2234 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2235 		else
2236 			op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2237 	} else
2238 		op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
2239 
2240 	op_reply_q->ci = 0;
2241 	op_reply_q->ephase = 1;
2242 	atomic_set(&op_reply_q->pend_ios, 0);
2243 	atomic_set(&op_reply_q->in_use, 0);
2244 	op_reply_q->enable_irq_poll = false;
2245 	op_reply_q->qfull_watermark =
2246 		op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
2247 
2248 	if (!op_reply_q->q_segments) {
2249 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2250 		if (retval) {
2251 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2252 			goto out;
2253 		}
2254 	}
2255 
2256 	memset(&create_req, 0, sizeof(create_req));
2257 	mutex_lock(&mrioc->init_cmds.mutex);
2258 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2259 		retval = -1;
2260 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2261 		goto out_unlock;
2262 	}
2263 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2264 	mrioc->init_cmds.is_waiting = 1;
2265 	mrioc->init_cmds.callback = NULL;
2266 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2267 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2268 	create_req.queue_id = cpu_to_le16(reply_qid);
2269 
2270 	if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2271 		op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2272 	else
2273 		op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2274 
2275 	if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2276 		create_req.flags =
2277 			MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2278 		create_req.msix_index =
2279 			cpu_to_le16(mrioc->intr_info[midx].msix_index);
2280 	} else {
2281 		create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2282 		ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2283 			reply_qid, midx);
2284 		if (!mrioc->active_poll_qcount)
2285 			disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2286 			    mrioc->intr_info_count - 1));
2287 	}
2288 
2289 	if (mrioc->enable_segqueue) {
2290 		create_req.flags |=
2291 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2292 		create_req.base_address = cpu_to_le64(
2293 		    op_reply_q->q_segment_list_dma);
2294 	} else
2295 		create_req.base_address = cpu_to_le64(
2296 		    op_reply_q->q_segments[0].segment_dma);
2297 
2298 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
2299 
2300 	init_completion(&mrioc->init_cmds.done);
2301 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2302 	    sizeof(create_req), 1);
2303 	if (retval) {
2304 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2305 		goto out_unlock;
2306 	}
2307 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2308 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2309 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2310 		ioc_err(mrioc, "create reply queue timed out\n");
2311 		mpi3mr_check_rh_fault_ioc(mrioc,
2312 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2313 		retval = -1;
2314 		goto out_unlock;
2315 	}
2316 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2317 	    != MPI3_IOCSTATUS_SUCCESS) {
2318 		ioc_err(mrioc,
2319 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2320 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2321 		    mrioc->init_cmds.ioc_loginfo);
2322 		retval = -1;
2323 		goto out_unlock;
2324 	}
2325 	op_reply_q->qid = reply_qid;
2326 	if (midx < mrioc->intr_info_count)
2327 		mrioc->intr_info[midx].op_reply_q = op_reply_q;
2328 
2329 	(op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2330 	    mrioc->active_poll_qcount++;
2331 
2332 out_unlock:
2333 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2334 	mutex_unlock(&mrioc->init_cmds.mutex);
2335 out:
2336 
2337 	return retval;
2338 }
2339 
2340 /**
2341  * mpi3mr_create_op_req_q - create operational request queue
2342  * @mrioc: Adapter instance reference
2343  * @idx: operational request queue index
2344  * @reply_qid: Reply queue ID
2345  *
2346  * Create operatinal request queue by issuing MPI request
2347  * through admin queue.
2348  *
2349  * Return:  0 on success, non-zero on failure.
2350  */
mpi3mr_create_op_req_q(struct mpi3mr_ioc * mrioc,u16 idx,u16 reply_qid)2351 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2352 	u16 reply_qid)
2353 {
2354 	struct mpi3_create_request_queue_request create_req;
2355 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2356 	int retval = 0;
2357 	u16 req_qid = 0;
2358 
2359 	req_qid = op_req_q->qid;
2360 
2361 	if (req_qid) {
2362 		retval = -1;
2363 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2364 		    req_qid);
2365 
2366 		return retval;
2367 	}
2368 	req_qid = idx + 1;
2369 
2370 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2371 	op_req_q->ci = 0;
2372 	op_req_q->pi = 0;
2373 	op_req_q->reply_qid = reply_qid;
2374 	spin_lock_init(&op_req_q->q_lock);
2375 
2376 	if (!op_req_q->q_segments) {
2377 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2378 		if (retval) {
2379 			mpi3mr_free_op_req_q_segments(mrioc, idx);
2380 			goto out;
2381 		}
2382 	}
2383 
2384 	memset(&create_req, 0, sizeof(create_req));
2385 	mutex_lock(&mrioc->init_cmds.mutex);
2386 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2387 		retval = -1;
2388 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2389 		goto out_unlock;
2390 	}
2391 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2392 	mrioc->init_cmds.is_waiting = 1;
2393 	mrioc->init_cmds.callback = NULL;
2394 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2395 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2396 	create_req.queue_id = cpu_to_le16(req_qid);
2397 	if (mrioc->enable_segqueue) {
2398 		create_req.flags =
2399 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2400 		create_req.base_address = cpu_to_le64(
2401 		    op_req_q->q_segment_list_dma);
2402 	} else
2403 		create_req.base_address = cpu_to_le64(
2404 		    op_req_q->q_segments[0].segment_dma);
2405 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
2406 	create_req.size = cpu_to_le16(op_req_q->num_requests);
2407 
2408 	init_completion(&mrioc->init_cmds.done);
2409 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
2410 	    sizeof(create_req), 1);
2411 	if (retval) {
2412 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2413 		goto out_unlock;
2414 	}
2415 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2416 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2417 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2418 		ioc_err(mrioc, "create request queue timed out\n");
2419 		mpi3mr_check_rh_fault_ioc(mrioc,
2420 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2421 		retval = -1;
2422 		goto out_unlock;
2423 	}
2424 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2425 	    != MPI3_IOCSTATUS_SUCCESS) {
2426 		ioc_err(mrioc,
2427 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2428 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2429 		    mrioc->init_cmds.ioc_loginfo);
2430 		retval = -1;
2431 		goto out_unlock;
2432 	}
2433 	op_req_q->qid = req_qid;
2434 
2435 out_unlock:
2436 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2437 	mutex_unlock(&mrioc->init_cmds.mutex);
2438 out:
2439 
2440 	return retval;
2441 }
2442 
2443 /**
2444  * mpi3mr_create_op_queues - create operational queue pairs
2445  * @mrioc: Adapter instance reference
2446  *
2447  * Allocate memory for operational queue meta data and call
2448  * create request and reply queue functions.
2449  *
2450  * Return: 0 on success, non-zero on failures.
2451  */
mpi3mr_create_op_queues(struct mpi3mr_ioc * mrioc)2452 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2453 {
2454 	int retval = 0;
2455 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2456 	u32 ioc_status;
2457 	enum mpi3mr_iocstate ioc_state;
2458 
2459 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2460 	    mrioc->facts.max_op_req_q);
2461 
2462 	msix_count_op_q =
2463 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
2464 	if (!mrioc->num_queues)
2465 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2466 	/*
2467 	 * During reset set the num_queues to the number of queues
2468 	 * that was set before the reset.
2469 	 */
2470 	num_queues = mrioc->num_op_reply_q ?
2471 	    mrioc->num_op_reply_q : mrioc->num_queues;
2472 	ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2473 	    num_queues);
2474 
2475 	if (!mrioc->req_qinfo) {
2476 		mrioc->req_qinfo = kzalloc_objs(struct op_req_qinfo, num_queues);
2477 		if (!mrioc->req_qinfo) {
2478 			retval = -1;
2479 			goto out_failed;
2480 		}
2481 
2482 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2483 		    num_queues, GFP_KERNEL);
2484 		if (!mrioc->op_reply_qinfo) {
2485 			retval = -1;
2486 			goto out_failed;
2487 		}
2488 	}
2489 
2490 	if (mrioc->enable_segqueue)
2491 		ioc_info(mrioc,
2492 		    "allocating operational queues through segmented queues\n");
2493 
2494 	for (i = 0; i < num_queues; i++) {
2495 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
2496 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2497 			break;
2498 		}
2499 		if (mpi3mr_create_op_req_q(mrioc, i,
2500 		    mrioc->op_reply_qinfo[i].qid)) {
2501 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2502 			mpi3mr_delete_op_reply_q(mrioc, i);
2503 			break;
2504 		}
2505 	}
2506 
2507 	if (i == 0) {
2508 		/* Not even one queue is created successfully*/
2509 		retval = -1;
2510 		goto out_failed;
2511 	}
2512 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2513 	ioc_state = mpi3mr_get_iocstate(mrioc);
2514 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2515 	    ioc_state != MRIOC_STATE_READY) {
2516 		mpi3mr_print_fault_info(mrioc);
2517 		retval = -1;
2518 		goto out_failed;
2519 	}
2520 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2521 	ioc_info(mrioc,
2522 	    "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2523 	    mrioc->num_op_reply_q, mrioc->default_qcount,
2524 	    mrioc->active_poll_qcount);
2525 
2526 	return retval;
2527 out_failed:
2528 	kfree(mrioc->req_qinfo);
2529 	mrioc->req_qinfo = NULL;
2530 
2531 	kfree(mrioc->op_reply_qinfo);
2532 	mrioc->op_reply_qinfo = NULL;
2533 
2534 	return retval;
2535 }
2536 
2537 /**
2538  * mpi3mr_op_request_post - Post request to operational queue
2539  * @mrioc: Adapter reference
2540  * @op_req_q: Operational request queue info
2541  * @req: MPI3 request
2542  *
2543  * Post the MPI3 request into operational request queue and
2544  * inform the controller, if the queue is full return
2545  * appropriate error.
2546  *
2547  * Return: 0 on success, non-zero on failure.
2548  */
mpi3mr_op_request_post(struct mpi3mr_ioc * mrioc,struct op_req_qinfo * op_req_q,u8 * req)2549 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2550 	struct op_req_qinfo *op_req_q, u8 *req)
2551 {
2552 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
2553 	int retval = 0;
2554 	unsigned long flags;
2555 	u8 *req_entry;
2556 	void *segment_base_addr;
2557 	u16 req_sz = mrioc->facts.op_req_sz;
2558 	struct segments *segments = op_req_q->q_segments;
2559 	struct op_reply_qinfo *op_reply_q = NULL;
2560 
2561 	reply_qidx = op_req_q->reply_qid - 1;
2562 	op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
2563 
2564 	if (mrioc->unrecoverable)
2565 		return -EFAULT;
2566 
2567 	spin_lock_irqsave(&op_req_q->q_lock, flags);
2568 	pi = op_req_q->pi;
2569 	max_entries = op_req_q->num_requests;
2570 
2571 	if (mpi3mr_check_req_qfull(op_req_q)) {
2572 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2573 		    reply_qidx, mrioc->op_reply_q_offset);
2574 		mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2575 
2576 		if (mpi3mr_check_req_qfull(op_req_q)) {
2577 			retval = -EAGAIN;
2578 			goto out;
2579 		}
2580 	}
2581 
2582 	if (mrioc->reset_in_progress) {
2583 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2584 		retval = -EAGAIN;
2585 		goto out;
2586 	}
2587 	if (mrioc->pci_err_recovery) {
2588 		ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2589 		retval = -EAGAIN;
2590 		goto out;
2591 	}
2592 
2593 	/* Reply queue is nearing to get full, push back IOs to SML */
2594 	if ((mrioc->prevent_reply_qfull == true) &&
2595 		(atomic_read(&op_reply_q->pend_ios) >
2596 	     (op_reply_q->qfull_watermark))) {
2597 		atomic_inc(&mrioc->reply_qfull_count);
2598 		retval = -EAGAIN;
2599 		goto out;
2600 	}
2601 
2602 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2603 	req_entry = (u8 *)segment_base_addr +
2604 	    ((pi % op_req_q->segment_qd) * req_sz);
2605 
2606 	memset(req_entry, 0, req_sz);
2607 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2608 
2609 	if (++pi == max_entries)
2610 		pi = 0;
2611 	op_req_q->pi = pi;
2612 
2613 #ifndef CONFIG_PREEMPT_RT
2614 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2615 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2616 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2617 #else
2618 	atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2619 #endif
2620 
2621 	writel(op_req_q->pi,
2622 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2623 
2624 out:
2625 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2626 	return retval;
2627 }
2628 
2629 /**
2630  * mpi3mr_check_rh_fault_ioc - check reset history and fault
2631  * controller
2632  * @mrioc: Adapter instance reference
2633  * @reason_code: reason code for the fault.
2634  *
2635  * This routine will save snapdump and fault the controller with
2636  * the given reason code if it is not already in the fault or
2637  * not asynchronosuly reset. This will be used to handle
2638  * initilaization time faults/resets/timeout as in those cases
2639  * immediate soft reset invocation is not required.
2640  *
2641  * Return:  None.
2642  */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc * mrioc,u32 reason_code)2643 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2644 {
2645 	u32 ioc_status, host_diagnostic, timeout;
2646 	union mpi3mr_trigger_data trigger_data;
2647 
2648 	if (mrioc->unrecoverable) {
2649 		ioc_err(mrioc, "controller is unrecoverable\n");
2650 		return;
2651 	}
2652 
2653 	if (!pci_device_is_present(mrioc->pdev)) {
2654 		mrioc->unrecoverable = 1;
2655 		ioc_err(mrioc, "controller is not present\n");
2656 		return;
2657 	}
2658 	memset(&trigger_data, 0, sizeof(trigger_data));
2659 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2660 
2661 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2662 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2663 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2664 		return;
2665 	} else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2666 		trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2667 		      MPI3_SYSIF_FAULT_CODE_MASK);
2668 
2669 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2670 		    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2671 		mpi3mr_print_fault_info(mrioc);
2672 		mpi3mr_save_fault_info(mrioc);
2673 		mrioc->fault_during_init = 1;
2674 		mrioc->fwfault_counter++;
2675 		return;
2676 	}
2677 
2678 	mpi3mr_set_diagsave(mrioc);
2679 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2680 	    reason_code);
2681 	trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2682 		      MPI3_SYSIF_FAULT_CODE_MASK);
2683 	mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2684 	    &trigger_data, 0);
2685 	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2686 	do {
2687 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2688 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2689 			break;
2690 		msleep(100);
2691 	} while (--timeout);
2692 
2693 	mpi3mr_save_fault_info(mrioc);
2694 	mrioc->fault_during_init = 1;
2695 	mrioc->fwfault_counter++;
2696 }
2697 
2698 /**
2699  * mpi3mr_sync_timestamp - Issue time stamp sync request
2700  * @mrioc: Adapter reference
2701  *
2702  * Issue IO unit control MPI request to synchornize firmware
2703  * timestamp with host time.
2704  *
2705  * Return: 0 on success, non-zero on failure.
2706  */
mpi3mr_sync_timestamp(struct mpi3mr_ioc * mrioc)2707 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2708 {
2709 	ktime_t current_time;
2710 	struct mpi3_iounit_control_request iou_ctrl;
2711 	int retval = 0;
2712 
2713 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2714 	mutex_lock(&mrioc->init_cmds.mutex);
2715 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2716 		retval = -1;
2717 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2718 		mutex_unlock(&mrioc->init_cmds.mutex);
2719 		goto out;
2720 	}
2721 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2722 	mrioc->init_cmds.is_waiting = 1;
2723 	mrioc->init_cmds.callback = NULL;
2724 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2725 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2726 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2727 	current_time = ktime_get_real();
2728 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2729 
2730 	init_completion(&mrioc->init_cmds.done);
2731 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2732 	    sizeof(iou_ctrl), 0);
2733 	if (retval) {
2734 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2735 		goto out_unlock;
2736 	}
2737 
2738 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2739 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2740 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2741 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2742 		mrioc->init_cmds.is_waiting = 0;
2743 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2744 			mpi3mr_check_rh_fault_ioc(mrioc,
2745 			    MPI3MR_RESET_FROM_TSU_TIMEOUT);
2746 		retval = -1;
2747 		goto out_unlock;
2748 	}
2749 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2750 	    != MPI3_IOCSTATUS_SUCCESS) {
2751 		ioc_err(mrioc,
2752 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2753 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2754 		    mrioc->init_cmds.ioc_loginfo);
2755 		retval = -1;
2756 		goto out_unlock;
2757 	}
2758 
2759 out_unlock:
2760 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2761 	mutex_unlock(&mrioc->init_cmds.mutex);
2762 
2763 out:
2764 	return retval;
2765 }
2766 
2767 /**
2768  * mpi3mr_print_pkg_ver - display controller fw package version
2769  * @mrioc: Adapter reference
2770  *
2771  * Retrieve firmware package version from the component image
2772  * header of the controller flash and display it.
2773  *
2774  * Return: 0 on success and non-zero on failure.
2775  */
mpi3mr_print_pkg_ver(struct mpi3mr_ioc * mrioc)2776 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2777 {
2778 	struct mpi3_ci_upload_request ci_upload;
2779 	int retval = -1;
2780 	void *data = NULL;
2781 	dma_addr_t data_dma;
2782 	struct mpi3_ci_manifest_mpi *manifest;
2783 	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2784 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2785 
2786 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2787 	    GFP_KERNEL);
2788 	if (!data)
2789 		return -ENOMEM;
2790 
2791 	memset(&ci_upload, 0, sizeof(ci_upload));
2792 	mutex_lock(&mrioc->init_cmds.mutex);
2793 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2794 		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2795 		mutex_unlock(&mrioc->init_cmds.mutex);
2796 		goto out;
2797 	}
2798 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2799 	mrioc->init_cmds.is_waiting = 1;
2800 	mrioc->init_cmds.callback = NULL;
2801 	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2802 	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2803 	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2804 	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2805 	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2806 	ci_upload.segment_size = cpu_to_le32(data_len);
2807 
2808 	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2809 	    data_dma);
2810 	init_completion(&mrioc->init_cmds.done);
2811 	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2812 	    sizeof(ci_upload), 1);
2813 	if (retval) {
2814 		ioc_err(mrioc, "posting get package version failed\n");
2815 		goto out_unlock;
2816 	}
2817 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2818 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2819 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2820 		ioc_err(mrioc, "get package version timed out\n");
2821 		mpi3mr_check_rh_fault_ioc(mrioc,
2822 		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2823 		retval = -1;
2824 		goto out_unlock;
2825 	}
2826 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2827 	    == MPI3_IOCSTATUS_SUCCESS) {
2828 		manifest = (struct mpi3_ci_manifest_mpi *) data;
2829 		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2830 			ioc_info(mrioc,
2831 			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2832 			    manifest->package_version.gen_major,
2833 			    manifest->package_version.gen_minor,
2834 			    manifest->package_version.phase_major,
2835 			    manifest->package_version.phase_minor,
2836 			    manifest->package_version.customer_id,
2837 			    manifest->package_version.build_num);
2838 		}
2839 	}
2840 	retval = 0;
2841 out_unlock:
2842 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2843 	mutex_unlock(&mrioc->init_cmds.mutex);
2844 
2845 out:
2846 	if (data)
2847 		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2848 		    data_dma);
2849 	return retval;
2850 }
2851 
2852 /**
2853  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2854  * @work: work struct
2855  *
2856  * Watch dog work periodically executed (1 second interval) to
2857  * monitor firmware fault and to issue periodic timer sync to
2858  * the firmware.
2859  *
2860  * Return: Nothing.
2861  */
mpi3mr_watchdog_work(struct work_struct * work)2862 static void mpi3mr_watchdog_work(struct work_struct *work)
2863 {
2864 	struct mpi3mr_ioc *mrioc =
2865 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2866 	unsigned long flags;
2867 	enum mpi3mr_iocstate ioc_state;
2868 	u32 host_diagnostic, ioc_status;
2869 	union mpi3mr_trigger_data trigger_data;
2870 	u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2871 
2872 	if (mrioc->fault_during_init) {
2873 		mpi3mr_fault_uevent_emit(mrioc);
2874 		mrioc->fault_during_init = 0;
2875 	}
2876 
2877 	if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2878 		return;
2879 
2880 	if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2881 		ioc_err(mrioc, "watchdog could not detect the controller\n");
2882 		mrioc->unrecoverable = 1;
2883 	}
2884 
2885 	if (mrioc->unrecoverable) {
2886 		ioc_err(mrioc,
2887 		    "flush pending commands for unrecoverable controller\n");
2888 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2889 		return;
2890 	}
2891 
2892 	if (atomic_read(&mrioc->admin_pend_isr)) {
2893 		ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
2894 				"flush admin replies\n");
2895 		mpi3mr_process_admin_reply_q(mrioc);
2896 	}
2897 
2898 	if (!(mrioc->facts.ioc_capabilities &
2899 		MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
2900 		(mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
2901 
2902 		mrioc->ts_update_counter = 0;
2903 		mpi3mr_sync_timestamp(mrioc);
2904 	}
2905 
2906 	if ((mrioc->prepare_for_reset) &&
2907 	    ((mrioc->prepare_for_reset_timeout_counter++) >=
2908 	     MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2909 		mpi3mr_soft_reset_handler(mrioc,
2910 		    MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2911 		return;
2912 	}
2913 
2914 	memset(&trigger_data, 0, sizeof(trigger_data));
2915 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2916 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2917 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2918 		    MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2919 		mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2920 		return;
2921 	}
2922 
2923 	/*Check for fault state every one second and issue Soft reset*/
2924 	ioc_state = mpi3mr_get_iocstate(mrioc);
2925 	if (ioc_state != MRIOC_STATE_FAULT)
2926 		goto schedule_work;
2927 
2928 	trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2929 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2930 	    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2931 	host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2932 	if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2933 		if (!mrioc->diagsave_timeout) {
2934 			mpi3mr_print_fault_info(mrioc);
2935 			ioc_warn(mrioc, "diag save in progress\n");
2936 		}
2937 		if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2938 			goto schedule_work;
2939 	}
2940 
2941 	mpi3mr_print_fault_info(mrioc);
2942 	mrioc->diagsave_timeout = 0;
2943 
2944 	if (!mpi3mr_is_fault_recoverable(mrioc)) {
2945 		mrioc->unrecoverable = 1;
2946 		goto schedule_work;
2947 	}
2948 
2949 	mpi3mr_save_fault_info(mrioc);
2950 	mpi3mr_fault_uevent_emit(mrioc);
2951 	mrioc->fwfault_counter++;
2952 
2953 	switch (trigger_data.fault) {
2954 	case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2955 	case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2956 		ioc_warn(mrioc,
2957 		    "controller requires system power cycle, marking controller as unrecoverable\n");
2958 		mrioc->unrecoverable = 1;
2959 		goto schedule_work;
2960 	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2961 		goto schedule_work;
2962 	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2963 		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2964 		break;
2965 	default:
2966 		break;
2967 	}
2968 	mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2969 	return;
2970 
2971 schedule_work:
2972 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2973 	if (mrioc->watchdog_work_q)
2974 		queue_delayed_work(mrioc->watchdog_work_q,
2975 		    &mrioc->watchdog_work,
2976 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2977 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2978 	return;
2979 }
2980 
2981 /**
2982  * mpi3mr_start_watchdog - Start watchdog
2983  * @mrioc: Adapter instance reference
2984  *
2985  * Create and start the watchdog thread to monitor controller
2986  * faults.
2987  *
2988  * Return: Nothing.
2989  */
mpi3mr_start_watchdog(struct mpi3mr_ioc * mrioc)2990 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2991 {
2992 	if (mrioc->watchdog_work_q)
2993 		return;
2994 
2995 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2996 	mrioc->watchdog_work_q = alloc_ordered_workqueue(
2997 		"watchdog_%s%d", WQ_MEM_RECLAIM, mrioc->name, mrioc->id);
2998 	if (!mrioc->watchdog_work_q) {
2999 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
3000 		return;
3001 	}
3002 
3003 	if (mrioc->watchdog_work_q)
3004 		queue_delayed_work(mrioc->watchdog_work_q,
3005 		    &mrioc->watchdog_work,
3006 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
3007 }
3008 
3009 /**
3010  * mpi3mr_stop_watchdog - Stop watchdog
3011  * @mrioc: Adapter instance reference
3012  *
3013  * Stop the watchdog thread created to monitor controller
3014  * faults.
3015  *
3016  * Return: Nothing.
3017  */
mpi3mr_stop_watchdog(struct mpi3mr_ioc * mrioc)3018 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
3019 {
3020 	unsigned long flags;
3021 	struct workqueue_struct *wq;
3022 
3023 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
3024 	wq = mrioc->watchdog_work_q;
3025 	mrioc->watchdog_work_q = NULL;
3026 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
3027 	if (wq) {
3028 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
3029 			flush_workqueue(wq);
3030 		destroy_workqueue(wq);
3031 	}
3032 }
3033 
3034 /**
3035  * mpi3mr_setup_admin_qpair - Setup admin queue pair
3036  * @mrioc: Adapter instance reference
3037  *
3038  * Allocate memory for admin queue pair if required and register
3039  * the admin queue with the controller.
3040  *
3041  * Return: 0 on success, non-zero on failures.
3042  */
mpi3mr_setup_admin_qpair(struct mpi3mr_ioc * mrioc)3043 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
3044 {
3045 	int retval = 0;
3046 	u32 num_admin_entries = 0;
3047 
3048 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
3049 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
3050 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
3051 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
3052 
3053 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
3054 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
3055 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
3056 	mrioc->admin_reply_ci = 0;
3057 	mrioc->admin_reply_ephase = 1;
3058 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
3059 	atomic_set(&mrioc->admin_pend_isr, 0);
3060 
3061 	if (!mrioc->admin_req_base) {
3062 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
3063 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
3064 
3065 		if (!mrioc->admin_req_base) {
3066 			retval = -1;
3067 			goto out_failed;
3068 		}
3069 
3070 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
3071 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
3072 		    GFP_KERNEL);
3073 
3074 		if (!mrioc->admin_reply_base) {
3075 			retval = -1;
3076 			goto out_failed;
3077 		}
3078 	}
3079 
3080 	num_admin_entries = (mrioc->num_admin_replies << 16) |
3081 	    (mrioc->num_admin_req);
3082 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
3083 	mpi3mr_writeq(mrioc->admin_req_dma,
3084 		&mrioc->sysif_regs->admin_request_queue_address,
3085 		&mrioc->adm_req_q_bar_writeq_lock);
3086 	mpi3mr_writeq(mrioc->admin_reply_dma,
3087 		&mrioc->sysif_regs->admin_reply_queue_address,
3088 		&mrioc->adm_reply_q_bar_writeq_lock);
3089 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
3090 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
3091 	return retval;
3092 
3093 out_failed:
3094 
3095 	if (mrioc->admin_reply_base) {
3096 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
3097 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
3098 		mrioc->admin_reply_base = NULL;
3099 	}
3100 	if (mrioc->admin_req_base) {
3101 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
3102 		    mrioc->admin_req_base, mrioc->admin_req_dma);
3103 		mrioc->admin_req_base = NULL;
3104 	}
3105 	return retval;
3106 }
3107 
3108 /**
3109  * mpi3mr_issue_iocfacts - Send IOC Facts
3110  * @mrioc: Adapter instance reference
3111  * @facts_data: Cached IOC facts data
3112  *
3113  * Issue IOC Facts MPI request through admin queue and wait for
3114  * the completion of it or time out.
3115  *
3116  * Return: 0 on success, non-zero on failures.
3117  */
mpi3mr_issue_iocfacts(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3118 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
3119 	struct mpi3_ioc_facts_data *facts_data)
3120 {
3121 	struct mpi3_ioc_facts_request iocfacts_req;
3122 	void *data = NULL;
3123 	dma_addr_t data_dma;
3124 	u32 data_len = sizeof(*facts_data);
3125 	int retval = 0;
3126 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
3127 
3128 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3129 	    GFP_KERNEL);
3130 
3131 	if (!data) {
3132 		retval = -1;
3133 		goto out;
3134 	}
3135 
3136 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
3137 	mutex_lock(&mrioc->init_cmds.mutex);
3138 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3139 		retval = -1;
3140 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
3141 		mutex_unlock(&mrioc->init_cmds.mutex);
3142 		goto out;
3143 	}
3144 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3145 	mrioc->init_cmds.is_waiting = 1;
3146 	mrioc->init_cmds.callback = NULL;
3147 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3148 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
3149 
3150 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
3151 	    data_dma);
3152 
3153 	init_completion(&mrioc->init_cmds.done);
3154 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
3155 	    sizeof(iocfacts_req), 1);
3156 	if (retval) {
3157 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
3158 		goto out_unlock;
3159 	}
3160 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3161 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3162 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3163 		ioc_err(mrioc, "ioc_facts timed out\n");
3164 		mpi3mr_check_rh_fault_ioc(mrioc,
3165 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
3166 		retval = -1;
3167 		goto out_unlock;
3168 	}
3169 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3170 	    != MPI3_IOCSTATUS_SUCCESS) {
3171 		ioc_err(mrioc,
3172 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3173 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3174 		    mrioc->init_cmds.ioc_loginfo);
3175 		retval = -1;
3176 		goto out_unlock;
3177 	}
3178 	memcpy(facts_data, (u8 *)data, data_len);
3179 	mpi3mr_process_factsdata(mrioc, facts_data);
3180 out_unlock:
3181 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3182 	mutex_unlock(&mrioc->init_cmds.mutex);
3183 
3184 out:
3185 	if (data)
3186 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
3187 
3188 	return retval;
3189 }
3190 
3191 /**
3192  * mpi3mr_check_reset_dma_mask - Process IOC facts data
3193  * @mrioc: Adapter instance reference
3194  *
3195  * Check whether the new DMA mask requested through IOCFacts by
3196  * firmware needs to be set, if so set it .
3197  *
3198  * Return: 0 on success, non-zero on failure.
3199  */
mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc * mrioc)3200 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
3201 {
3202 	struct pci_dev *pdev = mrioc->pdev;
3203 	int r;
3204 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
3205 
3206 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
3207 		return 0;
3208 
3209 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
3210 	    mrioc->dma_mask, facts_dma_mask);
3211 
3212 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
3213 	if (r) {
3214 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
3215 		    facts_dma_mask, r);
3216 		return r;
3217 	}
3218 	mrioc->dma_mask = facts_dma_mask;
3219 	return r;
3220 }
3221 
3222 /**
3223  * mpi3mr_process_factsdata - Process IOC facts data
3224  * @mrioc: Adapter instance reference
3225  * @facts_data: Cached IOC facts data
3226  *
3227  * Convert IOC facts data into cpu endianness and cache it in
3228  * the driver .
3229  *
3230  * Return: Nothing.
3231  */
mpi3mr_process_factsdata(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3232 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
3233 	struct mpi3_ioc_facts_data *facts_data)
3234 {
3235 	u32 ioc_config, req_sz, facts_flags;
3236 
3237 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
3238 	    (sizeof(*facts_data) / 4)) {
3239 		ioc_warn(mrioc,
3240 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
3241 		    sizeof(*facts_data),
3242 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
3243 	}
3244 
3245 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3246 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
3247 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
3248 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
3249 		ioc_err(mrioc,
3250 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
3251 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
3252 	}
3253 
3254 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3255 
3256 	facts_flags = le32_to_cpu(facts_data->flags);
3257 	mrioc->facts.op_req_sz = req_sz;
3258 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3259 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3260 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3261 
3262 	mrioc->facts.ioc_num = facts_data->ioc_number;
3263 	mrioc->facts.who_init = facts_data->who_init;
3264 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3265 	mrioc->facts.personality = (facts_flags &
3266 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3267 	mrioc->facts.dma_mask = (facts_flags &
3268 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3269 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3270 	mrioc->facts.dma_mask = (facts_flags &
3271 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3272 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3273 	mrioc->facts.max_req_limit = (facts_flags &
3274 			MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT);
3275 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
3276 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3277 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3278 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3279 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3280 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3281 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3282 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3283 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3284 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3285 	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3286 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3287 	mrioc->facts.max_pcie_switches =
3288 	    le16_to_cpu(facts_data->max_pcie_switches);
3289 	mrioc->facts.max_sasexpanders =
3290 	    le16_to_cpu(facts_data->max_sas_expanders);
3291 	mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3292 	mrioc->facts.max_sasinitiators =
3293 	    le16_to_cpu(facts_data->max_sas_initiators);
3294 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3295 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3296 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3297 	mrioc->facts.max_op_req_q =
3298 	    le16_to_cpu(facts_data->max_operational_request_queues);
3299 	mrioc->facts.max_op_reply_q =
3300 	    le16_to_cpu(facts_data->max_operational_reply_queues);
3301 	mrioc->facts.ioc_capabilities =
3302 	    le32_to_cpu(facts_data->ioc_capabilities);
3303 	mrioc->facts.fw_ver.build_num =
3304 	    le16_to_cpu(facts_data->fw_version.build_num);
3305 	mrioc->facts.fw_ver.cust_id =
3306 	    le16_to_cpu(facts_data->fw_version.customer_id);
3307 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3308 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3309 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3310 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3311 	mrioc->msix_count = min_t(int, mrioc->msix_count,
3312 	    mrioc->facts.max_msix_vectors);
3313 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3314 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3315 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3316 	mrioc->facts.shutdown_timeout =
3317 	    le16_to_cpu(facts_data->shutdown_timeout);
3318 	mrioc->facts.diag_trace_sz =
3319 	    le32_to_cpu(facts_data->diag_trace_size);
3320 	mrioc->facts.diag_fw_sz =
3321 	    le32_to_cpu(facts_data->diag_fw_size);
3322 	mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3323 	mrioc->facts.max_dev_per_tg =
3324 	    facts_data->max_devices_per_throttle_group;
3325 	mrioc->facts.io_throttle_data_length =
3326 	    le16_to_cpu(facts_data->io_throttle_data_length);
3327 	mrioc->facts.max_io_throttle_group =
3328 	    le16_to_cpu(facts_data->max_io_throttle_group);
3329 	mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3330 	mrioc->facts.io_throttle_high =
3331 	    le16_to_cpu(facts_data->io_throttle_high);
3332 
3333 	if (mrioc->facts.max_data_length ==
3334 	    MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3335 		mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3336 	else
3337 		mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3338 	/* Store in 512b block count */
3339 	if (mrioc->facts.io_throttle_data_length)
3340 		mrioc->io_throttle_data_length =
3341 		    (mrioc->facts.io_throttle_data_length * 2 * 4);
3342 	else
3343 		/* set the length to 1MB + 1K to disable throttle */
3344 		mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3345 
3346 	mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3347 	mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3348 
3349 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3350 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3351 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3352 	ioc_info(mrioc,
3353 	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3354 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3355 	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3356 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3357 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3358 	    mrioc->facts.sge_mod_shift);
3359 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3360 	    mrioc->facts.dma_mask, (facts_flags &
3361 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3362 	ioc_info(mrioc,
3363 	    "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3364 	    mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3365 	ioc_info(mrioc,
3366 	   "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3367 	   mrioc->facts.io_throttle_data_length * 4,
3368 	   mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3369 }
3370 
3371 /**
3372  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3373  * @mrioc: Adapter instance reference
3374  *
3375  * Allocate and initialize the reply free buffers, sense
3376  * buffers, reply free queue and sense buffer queue.
3377  *
3378  * Return: 0 on success, non-zero on failures.
3379  */
mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc * mrioc)3380 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3381 {
3382 	int retval = 0;
3383 	u32 sz, i;
3384 
3385 	if (mrioc->init_cmds.reply)
3386 		return retval;
3387 
3388 	mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3389 	if (!mrioc->init_cmds.reply)
3390 		goto out_failed;
3391 
3392 	mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3393 	if (!mrioc->bsg_cmds.reply)
3394 		goto out_failed;
3395 
3396 	mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3397 	if (!mrioc->transport_cmds.reply)
3398 		goto out_failed;
3399 
3400 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3401 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3402 		    GFP_KERNEL);
3403 		if (!mrioc->dev_rmhs_cmds[i].reply)
3404 			goto out_failed;
3405 	}
3406 
3407 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3408 		mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3409 		    GFP_KERNEL);
3410 		if (!mrioc->evtack_cmds[i].reply)
3411 			goto out_failed;
3412 	}
3413 
3414 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3415 	if (!mrioc->host_tm_cmds.reply)
3416 		goto out_failed;
3417 
3418 	mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3419 	if (!mrioc->pel_cmds.reply)
3420 		goto out_failed;
3421 
3422 	mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3423 	if (!mrioc->pel_abort_cmd.reply)
3424 		goto out_failed;
3425 
3426 	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3427 	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3428 						 GFP_KERNEL);
3429 	if (!mrioc->removepend_bitmap)
3430 		goto out_failed;
3431 
3432 	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3433 	if (!mrioc->devrem_bitmap)
3434 		goto out_failed;
3435 
3436 	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3437 						  GFP_KERNEL);
3438 	if (!mrioc->evtack_cmds_bitmap)
3439 		goto out_failed;
3440 
3441 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3442 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3443 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3444 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3445 
3446 	/* reply buffer pool, 16 byte align */
3447 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3448 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3449 	    &mrioc->pdev->dev, sz, 16, 0);
3450 	if (!mrioc->reply_buf_pool) {
3451 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3452 		goto out_failed;
3453 	}
3454 
3455 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3456 	    &mrioc->reply_buf_dma);
3457 	if (!mrioc->reply_buf)
3458 		goto out_failed;
3459 
3460 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3461 
3462 	/* reply free queue, 8 byte align */
3463 	sz = mrioc->reply_free_qsz * 8;
3464 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3465 	    &mrioc->pdev->dev, sz, 8, 0);
3466 	if (!mrioc->reply_free_q_pool) {
3467 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3468 		goto out_failed;
3469 	}
3470 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3471 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
3472 	if (!mrioc->reply_free_q)
3473 		goto out_failed;
3474 
3475 	/* sense buffer pool,  4 byte align */
3476 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3477 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3478 	    &mrioc->pdev->dev, sz, 4, 0);
3479 	if (!mrioc->sense_buf_pool) {
3480 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3481 		goto out_failed;
3482 	}
3483 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3484 	    &mrioc->sense_buf_dma);
3485 	if (!mrioc->sense_buf)
3486 		goto out_failed;
3487 
3488 	/* sense buffer queue, 8 byte align */
3489 	sz = mrioc->sense_buf_q_sz * 8;
3490 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3491 	    &mrioc->pdev->dev, sz, 8, 0);
3492 	if (!mrioc->sense_buf_q_pool) {
3493 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3494 		goto out_failed;
3495 	}
3496 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3497 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
3498 	if (!mrioc->sense_buf_q)
3499 		goto out_failed;
3500 
3501 	return retval;
3502 
3503 out_failed:
3504 	retval = -1;
3505 	return retval;
3506 }
3507 
3508 /**
3509  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3510  * buffers
3511  * @mrioc: Adapter instance reference
3512  *
3513  * Helper function to initialize reply and sense buffers along
3514  * with some debug prints.
3515  *
3516  * Return:  None.
3517  */
mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc * mrioc)3518 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3519 {
3520 	u32 sz, i;
3521 	dma_addr_t phy_addr;
3522 
3523 	sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3524 	ioc_info(mrioc,
3525 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3526 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3527 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3528 	sz = mrioc->reply_free_qsz * 8;
3529 	ioc_info(mrioc,
3530 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3531 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3532 	    (unsigned long long)mrioc->reply_free_q_dma);
3533 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3534 	ioc_info(mrioc,
3535 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3536 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3537 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3538 	sz = mrioc->sense_buf_q_sz * 8;
3539 	ioc_info(mrioc,
3540 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3541 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3542 	    (unsigned long long)mrioc->sense_buf_q_dma);
3543 
3544 	/* initialize Reply buffer Queue */
3545 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
3546 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3547 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3548 	mrioc->reply_free_q[i] = cpu_to_le64(0);
3549 
3550 	/* initialize Sense Buffer Queue */
3551 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
3552 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3553 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3554 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
3555 }
3556 
3557 /**
3558  * mpi3mr_issue_iocinit - Send IOC Init
3559  * @mrioc: Adapter instance reference
3560  *
3561  * Issue IOC Init MPI request through admin queue and wait for
3562  * the completion of it or time out.
3563  *
3564  * Return: 0 on success, non-zero on failures.
3565  */
mpi3mr_issue_iocinit(struct mpi3mr_ioc * mrioc)3566 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3567 {
3568 	struct mpi3_ioc_init_request iocinit_req;
3569 	struct mpi3_driver_info_layout *drv_info;
3570 	dma_addr_t data_dma;
3571 	u32 data_len = sizeof(*drv_info);
3572 	int retval = 0;
3573 	ktime_t current_time;
3574 
3575 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3576 	    GFP_KERNEL);
3577 	if (!drv_info) {
3578 		retval = -1;
3579 		goto out;
3580 	}
3581 	mpimr_initialize_reply_sbuf_queues(mrioc);
3582 
3583 	drv_info->information_length = cpu_to_le32(data_len);
3584 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3585 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3586 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3587 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3588 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3589 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3590 	    sizeof(drv_info->driver_release_date));
3591 	drv_info->driver_capabilities = 0;
3592 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3593 	    sizeof(mrioc->driver_info));
3594 
3595 	memset(&iocinit_req, 0, sizeof(iocinit_req));
3596 	mutex_lock(&mrioc->init_cmds.mutex);
3597 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3598 		retval = -1;
3599 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3600 		mutex_unlock(&mrioc->init_cmds.mutex);
3601 		goto out;
3602 	}
3603 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3604 	mrioc->init_cmds.is_waiting = 1;
3605 	mrioc->init_cmds.callback = NULL;
3606 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3607 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3608 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3609 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3610 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3611 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3612 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3613 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3614 	iocinit_req.reply_free_queue_address =
3615 	    cpu_to_le64(mrioc->reply_free_q_dma);
3616 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3617 	iocinit_req.sense_buffer_free_queue_depth =
3618 	    cpu_to_le16(mrioc->sense_buf_q_sz);
3619 	iocinit_req.sense_buffer_free_queue_address =
3620 	    cpu_to_le64(mrioc->sense_buf_q_dma);
3621 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3622 
3623 	current_time = ktime_get_real();
3624 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3625 
3626 	iocinit_req.msg_flags |=
3627 	    MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3628 	iocinit_req.msg_flags |=
3629 		MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3630 
3631 	init_completion(&mrioc->init_cmds.done);
3632 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3633 	    sizeof(iocinit_req), 1);
3634 	if (retval) {
3635 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3636 		goto out_unlock;
3637 	}
3638 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3639 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3640 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3641 		mpi3mr_check_rh_fault_ioc(mrioc,
3642 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3643 		ioc_err(mrioc, "ioc_init timed out\n");
3644 		retval = -1;
3645 		goto out_unlock;
3646 	}
3647 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3648 	    != MPI3_IOCSTATUS_SUCCESS) {
3649 		ioc_err(mrioc,
3650 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3651 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3652 		    mrioc->init_cmds.ioc_loginfo);
3653 		retval = -1;
3654 		goto out_unlock;
3655 	}
3656 
3657 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3658 	writel(mrioc->reply_free_queue_host_index,
3659 	    &mrioc->sysif_regs->reply_free_host_index);
3660 
3661 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
3662 	writel(mrioc->sbq_host_index,
3663 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
3664 out_unlock:
3665 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3666 	mutex_unlock(&mrioc->init_cmds.mutex);
3667 
3668 out:
3669 	if (drv_info)
3670 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3671 		    data_dma);
3672 
3673 	return retval;
3674 }
3675 
3676 /**
3677  * mpi3mr_unmask_events - Unmask events in event mask bitmap
3678  * @mrioc: Adapter instance reference
3679  * @event: MPI event ID
3680  *
3681  * Un mask the specific event by resetting the event_mask
3682  * bitmap.
3683  *
3684  * Return: 0 on success, non-zero on failures.
3685  */
mpi3mr_unmask_events(struct mpi3mr_ioc * mrioc,u16 event)3686 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3687 {
3688 	u32 desired_event;
3689 	u8 word;
3690 
3691 	if (event >= 128)
3692 		return;
3693 
3694 	desired_event = (1 << (event % 32));
3695 	word = event / 32;
3696 
3697 	mrioc->event_masks[word] &= ~desired_event;
3698 }
3699 
3700 /**
3701  * mpi3mr_issue_event_notification - Send event notification
3702  * @mrioc: Adapter instance reference
3703  *
3704  * Issue event notification MPI request through admin queue and
3705  * wait for the completion of it or time out.
3706  *
3707  * Return: 0 on success, non-zero on failures.
3708  */
mpi3mr_issue_event_notification(struct mpi3mr_ioc * mrioc)3709 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3710 {
3711 	struct mpi3_event_notification_request evtnotify_req;
3712 	int retval = 0;
3713 	u8 i;
3714 
3715 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3716 	mutex_lock(&mrioc->init_cmds.mutex);
3717 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3718 		retval = -1;
3719 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3720 		mutex_unlock(&mrioc->init_cmds.mutex);
3721 		goto out;
3722 	}
3723 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3724 	mrioc->init_cmds.is_waiting = 1;
3725 	mrioc->init_cmds.callback = NULL;
3726 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3727 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3728 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3729 		evtnotify_req.event_masks[i] =
3730 		    cpu_to_le32(mrioc->event_masks[i]);
3731 	init_completion(&mrioc->init_cmds.done);
3732 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3733 	    sizeof(evtnotify_req), 1);
3734 	if (retval) {
3735 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3736 		goto out_unlock;
3737 	}
3738 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3739 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3740 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3741 		ioc_err(mrioc, "event notification timed out\n");
3742 		mpi3mr_check_rh_fault_ioc(mrioc,
3743 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3744 		retval = -1;
3745 		goto out_unlock;
3746 	}
3747 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3748 	    != MPI3_IOCSTATUS_SUCCESS) {
3749 		ioc_err(mrioc,
3750 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3751 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3752 		    mrioc->init_cmds.ioc_loginfo);
3753 		retval = -1;
3754 		goto out_unlock;
3755 	}
3756 
3757 out_unlock:
3758 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3759 	mutex_unlock(&mrioc->init_cmds.mutex);
3760 out:
3761 	return retval;
3762 }
3763 
3764 /**
3765  * mpi3mr_process_event_ack - Process event acknowledgment
3766  * @mrioc: Adapter instance reference
3767  * @event: MPI3 event ID
3768  * @event_ctx: event context
3769  *
3770  * Send event acknowledgment through admin queue and wait for
3771  * it to complete.
3772  *
3773  * Return: 0 on success, non-zero on failures.
3774  */
mpi3mr_process_event_ack(struct mpi3mr_ioc * mrioc,u8 event,u32 event_ctx)3775 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3776 	u32 event_ctx)
3777 {
3778 	struct mpi3_event_ack_request evtack_req;
3779 	int retval = 0;
3780 
3781 	memset(&evtack_req, 0, sizeof(evtack_req));
3782 	mutex_lock(&mrioc->init_cmds.mutex);
3783 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3784 		retval = -1;
3785 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3786 		mutex_unlock(&mrioc->init_cmds.mutex);
3787 		goto out;
3788 	}
3789 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3790 	mrioc->init_cmds.is_waiting = 1;
3791 	mrioc->init_cmds.callback = NULL;
3792 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3793 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3794 	evtack_req.event = event;
3795 	evtack_req.event_context = cpu_to_le32(event_ctx);
3796 
3797 	init_completion(&mrioc->init_cmds.done);
3798 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3799 	    sizeof(evtack_req), 1);
3800 	if (retval) {
3801 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3802 		goto out_unlock;
3803 	}
3804 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3805 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3806 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3807 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3808 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3809 			mpi3mr_check_rh_fault_ioc(mrioc,
3810 			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3811 		retval = -1;
3812 		goto out_unlock;
3813 	}
3814 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3815 	    != MPI3_IOCSTATUS_SUCCESS) {
3816 		ioc_err(mrioc,
3817 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3818 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3819 		    mrioc->init_cmds.ioc_loginfo);
3820 		retval = -1;
3821 		goto out_unlock;
3822 	}
3823 
3824 out_unlock:
3825 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3826 	mutex_unlock(&mrioc->init_cmds.mutex);
3827 out:
3828 	return retval;
3829 }
3830 
3831 /**
3832  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3833  * @mrioc: Adapter instance reference
3834  *
3835  * Allocate chain buffers and set a bitmap to indicate free
3836  * chain buffers. Chain buffers are used to pass the SGE
3837  * information along with MPI3 SCSI IO requests for host I/O.
3838  *
3839  * Return: 0 on success, non-zero on failure
3840  */
mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc * mrioc)3841 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3842 {
3843 	int retval = 0;
3844 	u32 sz, i;
3845 	u16 num_chains;
3846 
3847 	if (mrioc->chain_sgl_list)
3848 		return retval;
3849 
3850 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3851 
3852 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3853 	    | SHOST_DIX_TYPE1_PROTECTION
3854 	    | SHOST_DIX_TYPE2_PROTECTION
3855 	    | SHOST_DIX_TYPE3_PROTECTION))
3856 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3857 
3858 	mrioc->chain_buf_count = num_chains;
3859 	sz = sizeof(struct chain_element) * num_chains;
3860 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3861 	if (!mrioc->chain_sgl_list)
3862 		goto out_failed;
3863 
3864 	if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3865 		MPI3MR_PAGE_SIZE_4K))
3866 		mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3867 			MPI3MR_PAGE_SIZE_4K;
3868 	sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3869 	ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3870 			mrioc->max_sgl_entries, sz/1024);
3871 
3872 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3873 	    &mrioc->pdev->dev, sz, 16, 0);
3874 	if (!mrioc->chain_buf_pool) {
3875 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3876 		goto out_failed;
3877 	}
3878 
3879 	for (i = 0; i < num_chains; i++) {
3880 		mrioc->chain_sgl_list[i].addr =
3881 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3882 		    &mrioc->chain_sgl_list[i].dma_addr);
3883 
3884 		if (!mrioc->chain_sgl_list[i].addr)
3885 			goto out_failed;
3886 	}
3887 	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3888 	if (!mrioc->chain_bitmap)
3889 		goto out_failed;
3890 	return retval;
3891 out_failed:
3892 	retval = -1;
3893 	return retval;
3894 }
3895 
3896 /**
3897  * mpi3mr_port_enable_complete - Mark port enable complete
3898  * @mrioc: Adapter instance reference
3899  * @drv_cmd: Internal command tracker
3900  *
3901  * Call back for asynchronous port enable request sets the
3902  * driver command to indicate port enable request is complete.
3903  *
3904  * Return: Nothing
3905  */
mpi3mr_port_enable_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)3906 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3907 	struct mpi3mr_drv_cmd *drv_cmd)
3908 {
3909 	drv_cmd->callback = NULL;
3910 	mrioc->scan_started = 0;
3911 	if (drv_cmd->state & MPI3MR_CMD_RESET)
3912 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3913 	else
3914 		mrioc->scan_failed = drv_cmd->ioc_status;
3915 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3916 }
3917 
3918 /**
3919  * mpi3mr_issue_port_enable - Issue Port Enable
3920  * @mrioc: Adapter instance reference
3921  * @async: Flag to wait for completion or not
3922  *
3923  * Issue Port Enable MPI request through admin queue and if the
3924  * async flag is not set wait for the completion of the port
3925  * enable or time out.
3926  *
3927  * Return: 0 on success, non-zero on failures.
3928  */
mpi3mr_issue_port_enable(struct mpi3mr_ioc * mrioc,u8 async)3929 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3930 {
3931 	struct mpi3_port_enable_request pe_req;
3932 	int retval = 0;
3933 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3934 
3935 	memset(&pe_req, 0, sizeof(pe_req));
3936 	mutex_lock(&mrioc->init_cmds.mutex);
3937 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3938 		retval = -1;
3939 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3940 		mutex_unlock(&mrioc->init_cmds.mutex);
3941 		goto out;
3942 	}
3943 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3944 	if (async) {
3945 		mrioc->init_cmds.is_waiting = 0;
3946 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3947 	} else {
3948 		mrioc->init_cmds.is_waiting = 1;
3949 		mrioc->init_cmds.callback = NULL;
3950 		init_completion(&mrioc->init_cmds.done);
3951 	}
3952 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3953 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3954 
3955 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3956 	if (retval) {
3957 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3958 		goto out_unlock;
3959 	}
3960 	if (async) {
3961 		mutex_unlock(&mrioc->init_cmds.mutex);
3962 		goto out;
3963 	}
3964 
3965 	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3966 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3967 		ioc_err(mrioc, "port enable timed out\n");
3968 		retval = -1;
3969 		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3970 		goto out_unlock;
3971 	}
3972 	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3973 
3974 out_unlock:
3975 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3976 	mutex_unlock(&mrioc->init_cmds.mutex);
3977 out:
3978 	return retval;
3979 }
3980 
3981 /* Protocol type to name mapper structure */
3982 static const struct {
3983 	u8 protocol;
3984 	char *name;
3985 } mpi3mr_protocols[] = {
3986 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3987 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3988 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3989 };
3990 
3991 /* Capability to name mapper structure*/
3992 static const struct {
3993 	u32 capability;
3994 	char *name;
3995 } mpi3mr_capabilities[] = {
3996 	{ MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
3997 	{ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
3998 };
3999 
4000 /**
4001  * mpi3mr_repost_diag_bufs - repost host diag buffers
4002  * @mrioc: Adapter instance reference
4003  *
4004  * repost firmware and trace diag buffers based on global
4005  * trigger flag from driver page 2
4006  *
4007  * Return: 0 on success, non-zero on failures.
4008  */
mpi3mr_repost_diag_bufs(struct mpi3mr_ioc * mrioc)4009 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
4010 {
4011 	u64 global_trigger;
4012 	union mpi3mr_trigger_data prev_trigger_data;
4013 	struct diag_buffer_desc *trace_hdb = NULL;
4014 	struct diag_buffer_desc *fw_hdb = NULL;
4015 	int retval = 0;
4016 	bool trace_repost_needed = false;
4017 	bool fw_repost_needed = false;
4018 	u8 prev_trigger_type;
4019 
4020 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4021 	if (retval)
4022 		return -1;
4023 
4024 	trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
4025 	    MPI3_DIAG_BUFFER_TYPE_TRACE);
4026 
4027 	if (trace_hdb &&
4028 	    trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
4029 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
4030 	    trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
4031 		trace_repost_needed = true;
4032 
4033 	fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
4034 
4035 	if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
4036 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
4037 	    fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
4038 		fw_repost_needed = true;
4039 
4040 	if (trace_repost_needed || fw_repost_needed) {
4041 		global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
4042 		if (global_trigger &
4043 		      MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
4044 			trace_repost_needed = false;
4045 		if (global_trigger &
4046 		     MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
4047 			fw_repost_needed = false;
4048 	}
4049 
4050 	if (trace_repost_needed) {
4051 		prev_trigger_type = trace_hdb->trigger_type;
4052 		memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
4053 		    sizeof(trace_hdb->trigger_data));
4054 		retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
4055 		if (!retval) {
4056 			dprint_init(mrioc, "trace diag buffer reposted");
4057 			mpi3mr_set_trigger_data_in_hdb(trace_hdb,
4058 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
4059 		} else {
4060 			trace_hdb->trigger_type = prev_trigger_type;
4061 			memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
4062 			    sizeof(prev_trigger_data));
4063 			ioc_err(mrioc, "trace diag buffer repost failed");
4064 			return -1;
4065 		}
4066 	}
4067 
4068 	if (fw_repost_needed) {
4069 		prev_trigger_type = fw_hdb->trigger_type;
4070 		memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
4071 		    sizeof(fw_hdb->trigger_data));
4072 		retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
4073 		if (!retval) {
4074 			dprint_init(mrioc, "firmware diag buffer reposted");
4075 			mpi3mr_set_trigger_data_in_hdb(fw_hdb,
4076 				    MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
4077 		} else {
4078 			fw_hdb->trigger_type = prev_trigger_type;
4079 			memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
4080 			    sizeof(prev_trigger_data));
4081 			ioc_err(mrioc, "firmware diag buffer repost failed");
4082 			return -1;
4083 		}
4084 	}
4085 	return retval;
4086 }
4087 
4088 /**
4089  * mpi3mr_read_tsu_interval - Update time stamp interval
4090  * @mrioc: Adapter instance reference
4091  *
4092  * Update time stamp interval if its defined in driver page 1,
4093  * otherwise use default value.
4094  *
4095  * Return: Nothing
4096  */
4097 static void
mpi3mr_read_tsu_interval(struct mpi3mr_ioc * mrioc)4098 mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
4099 {
4100 	struct mpi3_driver_page1 driver_pg1;
4101 	u16 pg_sz = sizeof(driver_pg1);
4102 	int retval = 0;
4103 
4104 	mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
4105 
4106 	retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
4107 	if (!retval && driver_pg1.time_stamp_update)
4108 		mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
4109 }
4110 
4111 /**
4112  * mpi3mr_print_ioc_info - Display controller information
4113  * @mrioc: Adapter instance reference
4114  *
4115  * Display controller personality, capability, supported
4116  * protocols etc.
4117  *
4118  * Return: Nothing
4119  */
4120 static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc * mrioc)4121 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
4122 {
4123 	int i = 0, bytes_written = 0;
4124 	const char *personality;
4125 	char protocol[50] = {0};
4126 	char capabilities[100] = {0};
4127 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
4128 
4129 	switch (mrioc->facts.personality) {
4130 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
4131 		personality = "Enhanced HBA";
4132 		break;
4133 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
4134 		personality = "RAID";
4135 		break;
4136 	default:
4137 		personality = "Unknown";
4138 		break;
4139 	}
4140 
4141 	ioc_info(mrioc, "Running in %s Personality", personality);
4142 
4143 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
4144 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
4145 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
4146 
4147 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
4148 		if (mrioc->facts.protocol_flags &
4149 		    mpi3mr_protocols[i].protocol) {
4150 			bytes_written += scnprintf(protocol + bytes_written,
4151 				    sizeof(protocol) - bytes_written, "%s%s",
4152 				    bytes_written ? "," : "",
4153 				    mpi3mr_protocols[i].name);
4154 		}
4155 	}
4156 
4157 	bytes_written = 0;
4158 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
4159 		if (mrioc->facts.protocol_flags &
4160 		    mpi3mr_capabilities[i].capability) {
4161 			bytes_written += scnprintf(capabilities + bytes_written,
4162 				    sizeof(capabilities) - bytes_written, "%s%s",
4163 				    bytes_written ? "," : "",
4164 				    mpi3mr_capabilities[i].name);
4165 		}
4166 	}
4167 
4168 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
4169 		 protocol, capabilities);
4170 }
4171 
4172 /**
4173  * mpi3mr_cleanup_resources - Free PCI resources
4174  * @mrioc: Adapter instance reference
4175  *
4176  * Unmap PCI device memory and disable PCI device.
4177  *
4178  * Return: 0 on success and non-zero on failure.
4179  */
mpi3mr_cleanup_resources(struct mpi3mr_ioc * mrioc)4180 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
4181 {
4182 	struct pci_dev *pdev = mrioc->pdev;
4183 
4184 	mpi3mr_cleanup_isr(mrioc);
4185 
4186 	if (mrioc->sysif_regs) {
4187 		iounmap((void __iomem *)mrioc->sysif_regs);
4188 		mrioc->sysif_regs = NULL;
4189 	}
4190 
4191 	if (pci_is_enabled(pdev)) {
4192 		if (mrioc->bars)
4193 			pci_release_selected_regions(pdev, mrioc->bars);
4194 		pci_disable_device(pdev);
4195 	}
4196 }
4197 
4198 /**
4199  * mpi3mr_setup_resources - Enable PCI resources
4200  * @mrioc: Adapter instance reference
4201  *
4202  * Enable PCI device memory, MSI-x registers and set DMA mask.
4203  *
4204  * Return: 0 on success and non-zero on failure.
4205  */
mpi3mr_setup_resources(struct mpi3mr_ioc * mrioc)4206 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
4207 {
4208 	struct pci_dev *pdev = mrioc->pdev;
4209 	u32 memap_sz = 0;
4210 	int i, retval = 0, capb = 0;
4211 	u16 message_control;
4212 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
4213 	    ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
4214 
4215 	if (pci_enable_device_mem(pdev)) {
4216 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
4217 		retval = -ENODEV;
4218 		goto out_failed;
4219 	}
4220 
4221 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4222 	if (!capb) {
4223 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
4224 		retval = -ENODEV;
4225 		goto out_failed;
4226 	}
4227 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
4228 
4229 	if (pci_request_selected_regions(pdev, mrioc->bars,
4230 	    mrioc->driver_name)) {
4231 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
4232 		retval = -ENODEV;
4233 		goto out_failed;
4234 	}
4235 
4236 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
4237 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4238 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
4239 			memap_sz = pci_resource_len(pdev, i);
4240 			mrioc->sysif_regs =
4241 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
4242 			break;
4243 		}
4244 	}
4245 
4246 	pci_set_master(pdev);
4247 
4248 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
4249 	if (retval) {
4250 		if (dma_mask != DMA_BIT_MASK(32)) {
4251 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
4252 			dma_mask = DMA_BIT_MASK(32);
4253 			retval = dma_set_mask_and_coherent(&pdev->dev,
4254 			    dma_mask);
4255 		}
4256 		if (retval) {
4257 			mrioc->dma_mask = 0;
4258 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
4259 			goto out_failed;
4260 		}
4261 	}
4262 	mrioc->dma_mask = dma_mask;
4263 
4264 	if (!mrioc->sysif_regs) {
4265 		ioc_err(mrioc,
4266 		    "Unable to map adapter memory or resource not found\n");
4267 		retval = -EINVAL;
4268 		goto out_failed;
4269 	}
4270 
4271 	pci_read_config_word(pdev, capb + 2, &message_control);
4272 	mrioc->msix_count = (message_control & 0x3FF) + 1;
4273 
4274 	pci_save_state(pdev);
4275 
4276 	pci_set_drvdata(pdev, mrioc->shost);
4277 
4278 	mpi3mr_ioc_disable_intr(mrioc);
4279 
4280 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
4281 	    (unsigned long long)mrioc->sysif_regs_phys,
4282 	    mrioc->sysif_regs, memap_sz);
4283 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4284 	    mrioc->msix_count);
4285 
4286 	if (!reset_devices && poll_queues > 0)
4287 		mrioc->requested_poll_qcount = min_t(int, poll_queues,
4288 				mrioc->msix_count - 2);
4289 	return retval;
4290 
4291 out_failed:
4292 	mpi3mr_cleanup_resources(mrioc);
4293 	return retval;
4294 }
4295 
4296 /**
4297  * mpi3mr_enable_events - Enable required events
4298  * @mrioc: Adapter instance reference
4299  *
4300  * This routine unmasks the events required by the driver by
4301  * sennding appropriate event mask bitmapt through an event
4302  * notification request.
4303  *
4304  * Return: 0 on success and non-zero on failure.
4305  */
mpi3mr_enable_events(struct mpi3mr_ioc * mrioc)4306 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4307 {
4308 	int retval = 0;
4309 	u32  i;
4310 
4311 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4312 		mrioc->event_masks[i] = -1;
4313 
4314 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4315 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4316 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4317 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4318 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4319 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4320 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4321 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4322 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4323 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4324 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4325 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4326 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4327 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4328 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4329 
4330 	retval = mpi3mr_issue_event_notification(mrioc);
4331 	if (retval)
4332 		ioc_err(mrioc, "failed to issue event notification %d\n",
4333 		    retval);
4334 	return retval;
4335 }
4336 
4337 /**
4338  * mpi3mr_init_ioc - Initialize the controller
4339  * @mrioc: Adapter instance reference
4340  *
4341  * This the controller initialization routine, executed either
4342  * after soft reset or from pci probe callback.
4343  * Setup the required resources, memory map the controller
4344  * registers, create admin and operational reply queue pairs,
4345  * allocate required memory for reply pool, sense buffer pool,
4346  * issue IOC init request to the firmware, unmask the events and
4347  * issue port enable to discover SAS/SATA/NVMe devies and RAID
4348  * volumes.
4349  *
4350  * Return: 0 on success and non-zero on failure.
4351  */
mpi3mr_init_ioc(struct mpi3mr_ioc * mrioc)4352 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4353 {
4354 	int retval = 0;
4355 	u8 retry = 0;
4356 	struct mpi3_ioc_facts_data facts_data;
4357 	u32 sz;
4358 
4359 retry_init:
4360 	retval = mpi3mr_bring_ioc_ready(mrioc);
4361 	if (retval) {
4362 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4363 		    retval);
4364 		goto out_failed_noretry;
4365 	}
4366 
4367 	retval = mpi3mr_setup_isr(mrioc, 1);
4368 	if (retval) {
4369 		ioc_err(mrioc, "Failed to setup ISR error %d\n",
4370 		    retval);
4371 		goto out_failed_noretry;
4372 	}
4373 
4374 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4375 	if (retval) {
4376 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4377 		    retval);
4378 		goto out_failed;
4379 	}
4380 
4381 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4382 	mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4383 	mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4384 	atomic_set(&mrioc->pend_large_data_sz, 0);
4385 
4386 	if (reset_devices)
4387 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4388 		    MPI3MR_HOST_IOS_KDUMP);
4389 
4390 	if (!(mrioc->facts.ioc_capabilities &
4391 	    MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4392 		mrioc->sas_transport_enabled = 1;
4393 		mrioc->scsi_device_channel = 1;
4394 		mrioc->shost->max_channel = 1;
4395 		mrioc->shost->transportt = mpi3mr_transport_template;
4396 	}
4397 
4398 	if (mrioc->facts.max_req_limit)
4399 		mrioc->prevent_reply_qfull = true;
4400 
4401 	if (mrioc->facts.ioc_capabilities &
4402 		MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
4403 		mrioc->seg_tb_support = true;
4404 
4405 	mrioc->reply_sz = mrioc->facts.reply_sz;
4406 
4407 	retval = mpi3mr_check_reset_dma_mask(mrioc);
4408 	if (retval) {
4409 		ioc_err(mrioc, "Resetting dma mask failed %d\n",
4410 		    retval);
4411 		goto out_failed_noretry;
4412 	}
4413 
4414 	mpi3mr_read_tsu_interval(mrioc);
4415 	mpi3mr_print_ioc_info(mrioc);
4416 
4417 	dprint_init(mrioc, "allocating host diag buffers\n");
4418 	mpi3mr_alloc_diag_bufs(mrioc);
4419 
4420 	dprint_init(mrioc, "allocating ioctl dma buffers\n");
4421 	mpi3mr_alloc_ioctl_dma_memory(mrioc);
4422 
4423 	dprint_init(mrioc, "posting host diag buffers\n");
4424 	retval = mpi3mr_post_diag_bufs(mrioc);
4425 
4426 	if (retval)
4427 		ioc_warn(mrioc, "failed to post host diag buffers\n");
4428 
4429 	if (!mrioc->init_cmds.reply) {
4430 		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4431 		if (retval) {
4432 			ioc_err(mrioc,
4433 			    "%s :Failed to allocated reply sense buffers %d\n",
4434 			    __func__, retval);
4435 			goto out_failed_noretry;
4436 		}
4437 	}
4438 
4439 	if (!mrioc->chain_sgl_list) {
4440 		retval = mpi3mr_alloc_chain_bufs(mrioc);
4441 		if (retval) {
4442 			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4443 			    retval);
4444 			goto out_failed_noretry;
4445 		}
4446 	}
4447 
4448 	retval = mpi3mr_issue_iocinit(mrioc);
4449 	if (retval) {
4450 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4451 		    retval);
4452 		goto out_failed;
4453 	}
4454 
4455 	retval = mpi3mr_print_pkg_ver(mrioc);
4456 	if (retval) {
4457 		ioc_err(mrioc, "failed to get package version\n");
4458 		goto out_failed;
4459 	}
4460 
4461 	retval = mpi3mr_setup_isr(mrioc, 0);
4462 	if (retval) {
4463 		ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4464 		    retval);
4465 		goto out_failed_noretry;
4466 	}
4467 
4468 	retval = mpi3mr_create_op_queues(mrioc);
4469 	if (retval) {
4470 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4471 		    retval);
4472 		goto out_failed;
4473 	}
4474 
4475 	if (!mrioc->pel_seqnum_virt) {
4476 		dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4477 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4478 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4479 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4480 		    GFP_KERNEL);
4481 		if (!mrioc->pel_seqnum_virt) {
4482 			retval = -ENOMEM;
4483 			goto out_failed_noretry;
4484 		}
4485 	}
4486 
4487 	if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4488 		dprint_init(mrioc, "allocating memory for throttle groups\n");
4489 		sz = sizeof(struct mpi3mr_throttle_group_info);
4490 		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4491 		if (!mrioc->throttle_groups) {
4492 			retval = -1;
4493 			goto out_failed_noretry;
4494 		}
4495 	}
4496 
4497 	retval = mpi3mr_enable_events(mrioc);
4498 	if (retval) {
4499 		ioc_err(mrioc, "failed to enable events %d\n",
4500 		    retval);
4501 		goto out_failed;
4502 	}
4503 
4504 	retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4505 	if (retval) {
4506 		ioc_err(mrioc, "failed to refresh triggers\n");
4507 		goto out_failed;
4508 	}
4509 
4510 	ioc_info(mrioc, "controller initialization completed successfully\n");
4511 	return retval;
4512 out_failed:
4513 	if (retry < 2) {
4514 		retry++;
4515 		ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4516 		    retry);
4517 		mpi3mr_memset_buffers(mrioc);
4518 		goto retry_init;
4519 	}
4520 	retval = -1;
4521 out_failed_noretry:
4522 	ioc_err(mrioc, "controller initialization failed\n");
4523 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4524 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4525 	mrioc->unrecoverable = 1;
4526 	return retval;
4527 }
4528 
4529 /**
4530  * mpi3mr_reinit_ioc - Re-Initialize the controller
4531  * @mrioc: Adapter instance reference
4532  * @is_resume: Called from resume or reset path
4533  *
4534  * This the controller re-initialization routine, executed from
4535  * the soft reset handler or resume callback. Creates
4536  * operational reply queue pairs, allocate required memory for
4537  * reply pool, sense buffer pool, issue IOC init request to the
4538  * firmware, unmask the events and issue port enable to discover
4539  * SAS/SATA/NVMe devices and RAID volumes.
4540  *
4541  * Return: 0 on success and non-zero on failure.
4542  */
mpi3mr_reinit_ioc(struct mpi3mr_ioc * mrioc,u8 is_resume)4543 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4544 {
4545 	int retval = 0;
4546 	u8 retry = 0;
4547 	struct mpi3_ioc_facts_data facts_data;
4548 	u32 pe_timeout, ioc_status;
4549 
4550 retry_init:
4551 	pe_timeout =
4552 	    (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4553 
4554 	dprint_reset(mrioc, "bringing up the controller to ready state\n");
4555 	retval = mpi3mr_bring_ioc_ready(mrioc);
4556 	if (retval) {
4557 		ioc_err(mrioc, "failed to bring to ready state\n");
4558 		goto out_failed_noretry;
4559 	}
4560 
4561 	mrioc->io_admin_reset_sync = 0;
4562 	if (is_resume || mrioc->block_on_pci_err) {
4563 		dprint_reset(mrioc, "setting up single ISR\n");
4564 		retval = mpi3mr_setup_isr(mrioc, 1);
4565 		if (retval) {
4566 			ioc_err(mrioc, "failed to setup ISR\n");
4567 			goto out_failed_noretry;
4568 		}
4569 	} else
4570 		mpi3mr_ioc_enable_intr(mrioc);
4571 
4572 	dprint_reset(mrioc, "getting ioc_facts\n");
4573 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4574 	if (retval) {
4575 		ioc_err(mrioc, "failed to get ioc_facts\n");
4576 		goto out_failed;
4577 	}
4578 
4579 	dprint_reset(mrioc, "validating ioc_facts\n");
4580 	retval = mpi3mr_revalidate_factsdata(mrioc);
4581 	if (retval) {
4582 		ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4583 		goto out_failed_noretry;
4584 	}
4585 
4586 	mpi3mr_read_tsu_interval(mrioc);
4587 	mpi3mr_print_ioc_info(mrioc);
4588 
4589 	if (is_resume) {
4590 		dprint_reset(mrioc, "posting host diag buffers\n");
4591 		retval = mpi3mr_post_diag_bufs(mrioc);
4592 		if (retval)
4593 			ioc_warn(mrioc, "failed to post host diag buffers\n");
4594 	} else {
4595 		retval = mpi3mr_repost_diag_bufs(mrioc);
4596 		if (retval)
4597 			ioc_warn(mrioc, "failed to re post host diag buffers\n");
4598 	}
4599 
4600 	dprint_reset(mrioc, "sending ioc_init\n");
4601 	retval = mpi3mr_issue_iocinit(mrioc);
4602 	if (retval) {
4603 		ioc_err(mrioc, "failed to send ioc_init\n");
4604 		goto out_failed;
4605 	}
4606 
4607 	dprint_reset(mrioc, "getting package version\n");
4608 	retval = mpi3mr_print_pkg_ver(mrioc);
4609 	if (retval) {
4610 		ioc_err(mrioc, "failed to get package version\n");
4611 		goto out_failed;
4612 	}
4613 
4614 	if (is_resume || mrioc->block_on_pci_err) {
4615 		dprint_reset(mrioc, "setting up multiple ISR\n");
4616 		retval = mpi3mr_setup_isr(mrioc, 0);
4617 		if (retval) {
4618 			ioc_err(mrioc, "failed to re-setup ISR\n");
4619 			goto out_failed_noretry;
4620 		}
4621 	}
4622 
4623 	dprint_reset(mrioc, "creating operational queue pairs\n");
4624 	retval = mpi3mr_create_op_queues(mrioc);
4625 	if (retval) {
4626 		ioc_err(mrioc, "failed to create operational queue pairs\n");
4627 		goto out_failed;
4628 	}
4629 
4630 	if (!mrioc->pel_seqnum_virt) {
4631 		dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4632 		mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4633 		mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4634 		    mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4635 		    GFP_KERNEL);
4636 		if (!mrioc->pel_seqnum_virt) {
4637 			retval = -ENOMEM;
4638 			goto out_failed_noretry;
4639 		}
4640 	}
4641 
4642 	if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4643 		ioc_err(mrioc,
4644 		    "cannot create minimum number of operational queues expected:%d created:%d\n",
4645 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4646 		retval = -1;
4647 		goto out_failed_noretry;
4648 	}
4649 
4650 	dprint_reset(mrioc, "enabling events\n");
4651 	retval = mpi3mr_enable_events(mrioc);
4652 	if (retval) {
4653 		ioc_err(mrioc, "failed to enable events\n");
4654 		goto out_failed;
4655 	}
4656 
4657 	mrioc->device_refresh_on = 1;
4658 	mpi3mr_add_event_wait_for_device_refresh(mrioc);
4659 
4660 	ioc_info(mrioc, "sending port enable\n");
4661 	retval = mpi3mr_issue_port_enable(mrioc, 1);
4662 	if (retval) {
4663 		ioc_err(mrioc, "failed to issue port enable\n");
4664 		goto out_failed;
4665 	}
4666 	do {
4667 		ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4668 		if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4669 			break;
4670 		if (!pci_device_is_present(mrioc->pdev))
4671 			mrioc->unrecoverable = 1;
4672 		if (mrioc->unrecoverable) {
4673 			retval = -1;
4674 			goto out_failed_noretry;
4675 		}
4676 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4677 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4678 		    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4679 			mpi3mr_print_fault_info(mrioc);
4680 			mrioc->init_cmds.is_waiting = 0;
4681 			mrioc->init_cmds.callback = NULL;
4682 			mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4683 			goto out_failed;
4684 		}
4685 	} while (--pe_timeout);
4686 
4687 	if (!pe_timeout) {
4688 		ioc_err(mrioc, "port enable timed out\n");
4689 		mpi3mr_check_rh_fault_ioc(mrioc,
4690 		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4691 		mrioc->init_cmds.is_waiting = 0;
4692 		mrioc->init_cmds.callback = NULL;
4693 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4694 		goto out_failed;
4695 	} else if (mrioc->scan_failed) {
4696 		ioc_err(mrioc,
4697 		    "port enable failed with status=0x%04x\n",
4698 		    mrioc->scan_failed);
4699 	} else
4700 		ioc_info(mrioc, "port enable completed successfully\n");
4701 
4702 	ioc_info(mrioc, "controller %s completed successfully\n",
4703 	    (is_resume)?"resume":"re-initialization");
4704 	return retval;
4705 out_failed:
4706 	if (retry < 2) {
4707 		retry++;
4708 		ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4709 		    (is_resume)?"resume":"re-initialization", retry);
4710 		mpi3mr_memset_buffers(mrioc);
4711 		goto retry_init;
4712 	}
4713 	retval = -1;
4714 out_failed_noretry:
4715 	ioc_err(mrioc, "controller %s is failed\n",
4716 	    (is_resume)?"resume":"re-initialization");
4717 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4718 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
4719 	mrioc->unrecoverable = 1;
4720 	return retval;
4721 }
4722 
4723 /**
4724  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4725  *					segments
4726  * @mrioc: Adapter instance reference
4727  * @qidx: Operational reply queue index
4728  *
4729  * Return: Nothing.
4730  */
mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4731 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4732 {
4733 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4734 	struct segments *segments;
4735 	int i, size;
4736 
4737 	if (!op_reply_q->q_segments)
4738 		return;
4739 
4740 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4741 	segments = op_reply_q->q_segments;
4742 	for (i = 0; i < op_reply_q->num_segments; i++)
4743 		memset(segments[i].segment, 0, size);
4744 }
4745 
4746 /**
4747  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4748  *					segments
4749  * @mrioc: Adapter instance reference
4750  * @qidx: Operational request queue index
4751  *
4752  * Return: Nothing.
4753  */
mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4754 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4755 {
4756 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4757 	struct segments *segments;
4758 	int i, size;
4759 
4760 	if (!op_req_q->q_segments)
4761 		return;
4762 
4763 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4764 	segments = op_req_q->q_segments;
4765 	for (i = 0; i < op_req_q->num_segments; i++)
4766 		memset(segments[i].segment, 0, size);
4767 }
4768 
4769 /**
4770  * mpi3mr_memset_buffers - memset memory for a controller
4771  * @mrioc: Adapter instance reference
4772  *
4773  * clear all the memory allocated for a controller, typically
4774  * called post reset to reuse the memory allocated during the
4775  * controller init.
4776  *
4777  * Return: Nothing.
4778  */
mpi3mr_memset_buffers(struct mpi3mr_ioc * mrioc)4779 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4780 {
4781 	u16 i;
4782 	struct mpi3mr_throttle_group_info *tg;
4783 
4784 	mrioc->change_count = 0;
4785 	mrioc->active_poll_qcount = 0;
4786 	mrioc->default_qcount = 0;
4787 	if (mrioc->admin_req_base)
4788 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4789 	if (mrioc->admin_reply_base)
4790 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4791 	atomic_set(&mrioc->admin_reply_q_in_use, 0);
4792 	atomic_set(&mrioc->admin_pend_isr, 0);
4793 
4794 	if (mrioc->init_cmds.reply) {
4795 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4796 		memset(mrioc->bsg_cmds.reply, 0,
4797 		    sizeof(*mrioc->bsg_cmds.reply));
4798 		memset(mrioc->host_tm_cmds.reply, 0,
4799 		    sizeof(*mrioc->host_tm_cmds.reply));
4800 		memset(mrioc->pel_cmds.reply, 0,
4801 		    sizeof(*mrioc->pel_cmds.reply));
4802 		memset(mrioc->pel_abort_cmd.reply, 0,
4803 		    sizeof(*mrioc->pel_abort_cmd.reply));
4804 		memset(mrioc->transport_cmds.reply, 0,
4805 		    sizeof(*mrioc->transport_cmds.reply));
4806 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4807 			memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4808 			    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4809 		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4810 			memset(mrioc->evtack_cmds[i].reply, 0,
4811 			    sizeof(*mrioc->evtack_cmds[i].reply));
4812 		bitmap_clear(mrioc->removepend_bitmap, 0,
4813 			     mrioc->dev_handle_bitmap_bits);
4814 		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4815 		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4816 			     MPI3MR_NUM_EVTACKCMD);
4817 	}
4818 
4819 	for (i = 0; i < mrioc->num_queues; i++) {
4820 		if (mrioc->op_reply_qinfo) {
4821 			mrioc->op_reply_qinfo[i].qid = 0;
4822 			mrioc->op_reply_qinfo[i].ci = 0;
4823 			mrioc->op_reply_qinfo[i].num_replies = 0;
4824 			mrioc->op_reply_qinfo[i].ephase = 0;
4825 			atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4826 			atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4827 			mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4828 		}
4829 
4830 		if (mrioc->req_qinfo) {
4831 			mrioc->req_qinfo[i].ci = 0;
4832 			mrioc->req_qinfo[i].pi = 0;
4833 			mrioc->req_qinfo[i].num_requests = 0;
4834 			mrioc->req_qinfo[i].qid = 0;
4835 			mrioc->req_qinfo[i].reply_qid = 0;
4836 			spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4837 			mpi3mr_memset_op_req_q_buffers(mrioc, i);
4838 		}
4839 	}
4840 
4841 	atomic_set(&mrioc->pend_large_data_sz, 0);
4842 	if (mrioc->throttle_groups) {
4843 		tg = mrioc->throttle_groups;
4844 		for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4845 			tg->id = 0;
4846 			tg->fw_qd = 0;
4847 			tg->modified_qd = 0;
4848 			tg->io_divert = 0;
4849 			tg->need_qd_reduction = 0;
4850 			tg->high = 0;
4851 			tg->low = 0;
4852 			tg->qd_reduction = 0;
4853 			atomic_set(&tg->pend_large_data_sz, 0);
4854 		}
4855 	}
4856 }
4857 
4858 /**
4859  * mpi3mr_free_mem - Free memory allocated for a controller
4860  * @mrioc: Adapter instance reference
4861  *
4862  * Free all the memory allocated for a controller.
4863  *
4864  * Return: Nothing.
4865  */
mpi3mr_free_mem(struct mpi3mr_ioc * mrioc)4866 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4867 {
4868 	u16 i, j;
4869 	struct mpi3mr_intr_info *intr_info;
4870 	struct diag_buffer_desc *diag_buffer;
4871 
4872 	mpi3mr_free_enclosure_list(mrioc);
4873 	mpi3mr_free_ioctl_dma_memory(mrioc);
4874 
4875 	if (mrioc->sense_buf_pool) {
4876 		if (mrioc->sense_buf)
4877 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4878 			    mrioc->sense_buf_dma);
4879 		dma_pool_destroy(mrioc->sense_buf_pool);
4880 		mrioc->sense_buf = NULL;
4881 		mrioc->sense_buf_pool = NULL;
4882 	}
4883 	if (mrioc->sense_buf_q_pool) {
4884 		if (mrioc->sense_buf_q)
4885 			dma_pool_free(mrioc->sense_buf_q_pool,
4886 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4887 		dma_pool_destroy(mrioc->sense_buf_q_pool);
4888 		mrioc->sense_buf_q = NULL;
4889 		mrioc->sense_buf_q_pool = NULL;
4890 	}
4891 
4892 	if (mrioc->reply_buf_pool) {
4893 		if (mrioc->reply_buf)
4894 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4895 			    mrioc->reply_buf_dma);
4896 		dma_pool_destroy(mrioc->reply_buf_pool);
4897 		mrioc->reply_buf = NULL;
4898 		mrioc->reply_buf_pool = NULL;
4899 	}
4900 	if (mrioc->reply_free_q_pool) {
4901 		if (mrioc->reply_free_q)
4902 			dma_pool_free(mrioc->reply_free_q_pool,
4903 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
4904 		dma_pool_destroy(mrioc->reply_free_q_pool);
4905 		mrioc->reply_free_q = NULL;
4906 		mrioc->reply_free_q_pool = NULL;
4907 	}
4908 
4909 	for (i = 0; i < mrioc->num_op_req_q; i++)
4910 		mpi3mr_free_op_req_q_segments(mrioc, i);
4911 
4912 	for (i = 0; i < mrioc->num_op_reply_q; i++)
4913 		mpi3mr_free_op_reply_q_segments(mrioc, i);
4914 
4915 	for (i = 0; i < mrioc->intr_info_count; i++) {
4916 		intr_info = mrioc->intr_info + i;
4917 		intr_info->op_reply_q = NULL;
4918 	}
4919 
4920 	kfree(mrioc->req_qinfo);
4921 	mrioc->req_qinfo = NULL;
4922 	mrioc->num_op_req_q = 0;
4923 
4924 	kfree(mrioc->op_reply_qinfo);
4925 	mrioc->op_reply_qinfo = NULL;
4926 	mrioc->num_op_reply_q = 0;
4927 
4928 	kfree(mrioc->init_cmds.reply);
4929 	mrioc->init_cmds.reply = NULL;
4930 
4931 	kfree(mrioc->bsg_cmds.reply);
4932 	mrioc->bsg_cmds.reply = NULL;
4933 
4934 	kfree(mrioc->host_tm_cmds.reply);
4935 	mrioc->host_tm_cmds.reply = NULL;
4936 
4937 	kfree(mrioc->pel_cmds.reply);
4938 	mrioc->pel_cmds.reply = NULL;
4939 
4940 	kfree(mrioc->pel_abort_cmd.reply);
4941 	mrioc->pel_abort_cmd.reply = NULL;
4942 
4943 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4944 		kfree(mrioc->evtack_cmds[i].reply);
4945 		mrioc->evtack_cmds[i].reply = NULL;
4946 	}
4947 
4948 	bitmap_free(mrioc->removepend_bitmap);
4949 	mrioc->removepend_bitmap = NULL;
4950 
4951 	bitmap_free(mrioc->devrem_bitmap);
4952 	mrioc->devrem_bitmap = NULL;
4953 
4954 	bitmap_free(mrioc->evtack_cmds_bitmap);
4955 	mrioc->evtack_cmds_bitmap = NULL;
4956 
4957 	bitmap_free(mrioc->chain_bitmap);
4958 	mrioc->chain_bitmap = NULL;
4959 
4960 	kfree(mrioc->transport_cmds.reply);
4961 	mrioc->transport_cmds.reply = NULL;
4962 
4963 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4964 		kfree(mrioc->dev_rmhs_cmds[i].reply);
4965 		mrioc->dev_rmhs_cmds[i].reply = NULL;
4966 	}
4967 
4968 	if (mrioc->chain_buf_pool) {
4969 		for (i = 0; i < mrioc->chain_buf_count; i++) {
4970 			if (mrioc->chain_sgl_list[i].addr) {
4971 				dma_pool_free(mrioc->chain_buf_pool,
4972 				    mrioc->chain_sgl_list[i].addr,
4973 				    mrioc->chain_sgl_list[i].dma_addr);
4974 				mrioc->chain_sgl_list[i].addr = NULL;
4975 			}
4976 		}
4977 		dma_pool_destroy(mrioc->chain_buf_pool);
4978 		mrioc->chain_buf_pool = NULL;
4979 	}
4980 
4981 	kfree(mrioc->chain_sgl_list);
4982 	mrioc->chain_sgl_list = NULL;
4983 
4984 	if (mrioc->admin_reply_base) {
4985 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4986 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
4987 		mrioc->admin_reply_base = NULL;
4988 	}
4989 	if (mrioc->admin_req_base) {
4990 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4991 		    mrioc->admin_req_base, mrioc->admin_req_dma);
4992 		mrioc->admin_req_base = NULL;
4993 	}
4994 
4995 	if (mrioc->pel_seqnum_virt) {
4996 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4997 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4998 		mrioc->pel_seqnum_virt = NULL;
4999 	}
5000 
5001 	for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
5002 		diag_buffer = &mrioc->diag_buffers[i];
5003 		if ((i == 0) && mrioc->seg_tb_support) {
5004 			if (mrioc->trace_buf_pool) {
5005 				for (j = 0; j < mrioc->num_tb_segs; j++) {
5006 					if (mrioc->trace_buf[j].segment) {
5007 						dma_pool_free(mrioc->trace_buf_pool,
5008 						    mrioc->trace_buf[j].segment,
5009 						    mrioc->trace_buf[j].segment_dma);
5010 						mrioc->trace_buf[j].segment = NULL;
5011 					}
5012 
5013 					mrioc->trace_buf[j].segment = NULL;
5014 				}
5015 				dma_pool_destroy(mrioc->trace_buf_pool);
5016 				mrioc->trace_buf_pool = NULL;
5017 			}
5018 
5019 			kfree(mrioc->trace_buf);
5020 			mrioc->trace_buf = NULL;
5021 			diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
5022 		}
5023 		if (diag_buffer->addr) {
5024 			dma_free_coherent(&mrioc->pdev->dev,
5025 			    diag_buffer->size, diag_buffer->addr,
5026 			    diag_buffer->dma_addr);
5027 			diag_buffer->addr = NULL;
5028 			diag_buffer->size = 0;
5029 			diag_buffer->type = 0;
5030 			diag_buffer->status = 0;
5031 		}
5032 	}
5033 
5034 	kfree(mrioc->throttle_groups);
5035 	mrioc->throttle_groups = NULL;
5036 
5037 	kfree(mrioc->logdata_buf);
5038 	mrioc->logdata_buf = NULL;
5039 
5040 }
5041 
5042 /**
5043  * mpi3mr_issue_ioc_shutdown - shutdown controller
5044  * @mrioc: Adapter instance reference
5045  *
5046  * Send shutodwn notification to the controller and wait for the
5047  * shutdown_timeout for it to be completed.
5048  *
5049  * Return: Nothing.
5050  */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc * mrioc)5051 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
5052 {
5053 	u32 ioc_config, ioc_status;
5054 	u8 retval = 1;
5055 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
5056 
5057 	ioc_info(mrioc, "Issuing shutdown Notification\n");
5058 	if (mrioc->unrecoverable) {
5059 		ioc_warn(mrioc,
5060 		    "IOC is unrecoverable shutdown is not issued\n");
5061 		return;
5062 	}
5063 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5064 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5065 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
5066 		ioc_info(mrioc, "shutdown already in progress\n");
5067 		return;
5068 	}
5069 
5070 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
5071 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
5072 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
5073 
5074 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
5075 
5076 	if (mrioc->facts.shutdown_timeout)
5077 		timeout = mrioc->facts.shutdown_timeout * 10;
5078 
5079 	do {
5080 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5081 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5082 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
5083 			retval = 0;
5084 			break;
5085 		}
5086 		msleep(100);
5087 	} while (--timeout);
5088 
5089 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
5090 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
5091 
5092 	if (retval) {
5093 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
5094 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
5095 			ioc_warn(mrioc,
5096 			    "shutdown still in progress after timeout\n");
5097 	}
5098 
5099 	ioc_info(mrioc,
5100 	    "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
5101 	    (!retval) ? "successful" : "failed", ioc_status,
5102 	    ioc_config);
5103 }
5104 
5105 /**
5106  * mpi3mr_cleanup_ioc - Cleanup controller
5107  * @mrioc: Adapter instance reference
5108  *
5109  * controller cleanup handler, Message unit reset or soft reset
5110  * and shutdown notification is issued to the controller.
5111  *
5112  * Return: Nothing.
5113  */
mpi3mr_cleanup_ioc(struct mpi3mr_ioc * mrioc)5114 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
5115 {
5116 	enum mpi3mr_iocstate ioc_state;
5117 
5118 	dprint_exit(mrioc, "cleaning up the controller\n");
5119 	mpi3mr_ioc_disable_intr(mrioc);
5120 
5121 	ioc_state = mpi3mr_get_iocstate(mrioc);
5122 
5123 	if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
5124 	    !mrioc->pci_err_recovery &&
5125 	    (ioc_state == MRIOC_STATE_READY)) {
5126 		if (mpi3mr_issue_and_process_mur(mrioc,
5127 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
5128 			mpi3mr_issue_reset(mrioc,
5129 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
5130 			    MPI3MR_RESET_FROM_MUR_FAILURE);
5131 		mpi3mr_issue_ioc_shutdown(mrioc);
5132 	}
5133 	dprint_exit(mrioc, "controller cleanup completed\n");
5134 }
5135 
5136 /**
5137  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
5138  * @mrioc: Adapter instance reference
5139  * @cmdptr: Internal command tracker
5140  *
5141  * Complete an internal driver commands with state indicating it
5142  * is completed due to reset.
5143  *
5144  * Return: Nothing.
5145  */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * cmdptr)5146 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
5147 	struct mpi3mr_drv_cmd *cmdptr)
5148 {
5149 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
5150 		cmdptr->state |= MPI3MR_CMD_RESET;
5151 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
5152 		if (cmdptr->is_waiting) {
5153 			complete(&cmdptr->done);
5154 			cmdptr->is_waiting = 0;
5155 		} else if (cmdptr->callback)
5156 			cmdptr->callback(mrioc, cmdptr);
5157 	}
5158 }
5159 
5160 /**
5161  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
5162  * @mrioc: Adapter instance reference
5163  *
5164  * Flush all internal driver commands post reset
5165  *
5166  * Return: Nothing.
5167  */
mpi3mr_flush_drv_cmds(struct mpi3mr_ioc * mrioc)5168 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
5169 {
5170 	struct mpi3mr_drv_cmd *cmdptr;
5171 	u8 i;
5172 
5173 	cmdptr = &mrioc->init_cmds;
5174 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5175 
5176 	cmdptr = &mrioc->cfg_cmds;
5177 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5178 
5179 	cmdptr = &mrioc->bsg_cmds;
5180 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5181 	cmdptr = &mrioc->host_tm_cmds;
5182 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5183 
5184 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5185 		cmdptr = &mrioc->dev_rmhs_cmds[i];
5186 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5187 	}
5188 
5189 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5190 		cmdptr = &mrioc->evtack_cmds[i];
5191 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5192 	}
5193 
5194 	cmdptr = &mrioc->pel_cmds;
5195 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5196 
5197 	cmdptr = &mrioc->pel_abort_cmd;
5198 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5199 
5200 	cmdptr = &mrioc->transport_cmds;
5201 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5202 }
5203 
5204 /**
5205  * mpi3mr_pel_wait_post - Issue PEL Wait
5206  * @mrioc: Adapter instance reference
5207  * @drv_cmd: Internal command tracker
5208  *
5209  * Issue PEL Wait MPI request through admin queue and return.
5210  *
5211  * Return: Nothing.
5212  */
mpi3mr_pel_wait_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5213 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
5214 	struct mpi3mr_drv_cmd *drv_cmd)
5215 {
5216 	struct mpi3_pel_req_action_wait pel_wait;
5217 
5218 	mrioc->pel_abort_requested = false;
5219 
5220 	memset(&pel_wait, 0, sizeof(pel_wait));
5221 	drv_cmd->state = MPI3MR_CMD_PENDING;
5222 	drv_cmd->is_waiting = 0;
5223 	drv_cmd->callback = mpi3mr_pel_wait_complete;
5224 	drv_cmd->ioc_status = 0;
5225 	drv_cmd->ioc_loginfo = 0;
5226 	pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5227 	pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5228 	pel_wait.action = MPI3_PEL_ACTION_WAIT;
5229 	pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
5230 	pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
5231 	pel_wait.class = cpu_to_le16(mrioc->pel_class);
5232 	pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
5233 	dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
5234 	    mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
5235 
5236 	if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
5237 		dprint_bsg_err(mrioc,
5238 			    "Issuing PELWait: Admin post failed\n");
5239 		drv_cmd->state = MPI3MR_CMD_NOTUSED;
5240 		drv_cmd->callback = NULL;
5241 		drv_cmd->retry_count = 0;
5242 		mrioc->pel_enabled = false;
5243 	}
5244 }
5245 
5246 /**
5247  * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
5248  * @mrioc: Adapter instance reference
5249  * @drv_cmd: Internal command tracker
5250  *
5251  * Issue PEL get sequence number MPI request through admin queue
5252  * and return.
5253  *
5254  * Return: 0 on success, non-zero on failure.
5255  */
mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5256 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
5257 	struct mpi3mr_drv_cmd *drv_cmd)
5258 {
5259 	struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
5260 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5261 	int retval = 0;
5262 
5263 	memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
5264 	mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
5265 	mrioc->pel_cmds.is_waiting = 0;
5266 	mrioc->pel_cmds.ioc_status = 0;
5267 	mrioc->pel_cmds.ioc_loginfo = 0;
5268 	mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
5269 	pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5270 	pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5271 	pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
5272 	mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
5273 	    mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
5274 
5275 	retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
5276 			sizeof(pel_getseq_req), 0);
5277 	if (retval) {
5278 		if (drv_cmd) {
5279 			drv_cmd->state = MPI3MR_CMD_NOTUSED;
5280 			drv_cmd->callback = NULL;
5281 			drv_cmd->retry_count = 0;
5282 		}
5283 		mrioc->pel_enabled = false;
5284 	}
5285 
5286 	return retval;
5287 }
5288 
5289 /**
5290  * mpi3mr_pel_wait_complete - PELWait Completion callback
5291  * @mrioc: Adapter instance reference
5292  * @drv_cmd: Internal command tracker
5293  *
5294  * This is a callback handler for the PELWait request and
5295  * firmware completes a PELWait request when it is aborted or a
5296  * new PEL entry is available. This sends AEN to the application
5297  * and if the PELwait completion is not due to PELAbort then
5298  * this will send a request for new PEL Sequence number
5299  *
5300  * Return: Nothing.
5301  */
mpi3mr_pel_wait_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5302 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5303 	struct mpi3mr_drv_cmd *drv_cmd)
5304 {
5305 	struct mpi3_pel_reply *pel_reply = NULL;
5306 	u16 ioc_status, pe_log_status;
5307 	bool do_retry = false;
5308 
5309 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5310 		goto cleanup_drv_cmd;
5311 
5312 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5313 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5314 		ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5315 			__func__, ioc_status, drv_cmd->ioc_loginfo);
5316 		dprint_bsg_err(mrioc,
5317 		    "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5318 		    ioc_status, drv_cmd->ioc_loginfo);
5319 		do_retry = true;
5320 	}
5321 
5322 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5323 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5324 
5325 	if (!pel_reply) {
5326 		dprint_bsg_err(mrioc,
5327 		    "pel_wait: failed due to no reply\n");
5328 		goto out_failed;
5329 	}
5330 
5331 	pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5332 	if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5333 	    (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5334 		ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5335 			__func__, pe_log_status);
5336 		dprint_bsg_err(mrioc,
5337 		    "pel_wait: failed due to pel_log_status(0x%04x)\n",
5338 		    pe_log_status);
5339 		do_retry = true;
5340 	}
5341 
5342 	if (do_retry) {
5343 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5344 			drv_cmd->retry_count++;
5345 			dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5346 			    drv_cmd->retry_count);
5347 			mpi3mr_pel_wait_post(mrioc, drv_cmd);
5348 			return;
5349 		}
5350 		dprint_bsg_err(mrioc,
5351 		    "pel_wait: failed after all retries(%d)\n",
5352 		    drv_cmd->retry_count);
5353 		goto out_failed;
5354 	}
5355 	atomic64_inc(&event_counter);
5356 	if (!mrioc->pel_abort_requested) {
5357 		mrioc->pel_cmds.retry_count = 0;
5358 		mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5359 	}
5360 
5361 	return;
5362 out_failed:
5363 	mrioc->pel_enabled = false;
5364 cleanup_drv_cmd:
5365 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5366 	drv_cmd->callback = NULL;
5367 	drv_cmd->retry_count = 0;
5368 }
5369 
5370 /**
5371  * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5372  * @mrioc: Adapter instance reference
5373  * @drv_cmd: Internal command tracker
5374  *
5375  * This is a callback handler for the PEL get sequence number
5376  * request and a new PEL wait request will be issued to the
5377  * firmware from this
5378  *
5379  * Return: Nothing.
5380  */
mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5381 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5382 	struct mpi3mr_drv_cmd *drv_cmd)
5383 {
5384 	struct mpi3_pel_reply *pel_reply = NULL;
5385 	struct mpi3_pel_seq *pel_seqnum_virt;
5386 	u16 ioc_status;
5387 	bool do_retry = false;
5388 
5389 	pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5390 
5391 	if (drv_cmd->state & MPI3MR_CMD_RESET)
5392 		goto cleanup_drv_cmd;
5393 
5394 	ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5395 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5396 		dprint_bsg_err(mrioc,
5397 		    "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5398 		    ioc_status, drv_cmd->ioc_loginfo);
5399 		do_retry = true;
5400 	}
5401 
5402 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5403 		pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5404 	if (!pel_reply) {
5405 		dprint_bsg_err(mrioc,
5406 		    "pel_get_seqnum: failed due to no reply\n");
5407 		goto out_failed;
5408 	}
5409 
5410 	if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5411 		dprint_bsg_err(mrioc,
5412 		    "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5413 		    le16_to_cpu(pel_reply->pe_log_status));
5414 		do_retry = true;
5415 	}
5416 
5417 	if (do_retry) {
5418 		if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5419 			drv_cmd->retry_count++;
5420 			dprint_bsg_err(mrioc,
5421 			    "pel_get_seqnum: retrying(%d)\n",
5422 			    drv_cmd->retry_count);
5423 			mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5424 			return;
5425 		}
5426 
5427 		dprint_bsg_err(mrioc,
5428 		    "pel_get_seqnum: failed after all retries(%d)\n",
5429 		    drv_cmd->retry_count);
5430 		goto out_failed;
5431 	}
5432 	mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5433 	drv_cmd->retry_count = 0;
5434 	mpi3mr_pel_wait_post(mrioc, drv_cmd);
5435 
5436 	return;
5437 out_failed:
5438 	mrioc->pel_enabled = false;
5439 cleanup_drv_cmd:
5440 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
5441 	drv_cmd->callback = NULL;
5442 	drv_cmd->retry_count = 0;
5443 }
5444 
5445 /**
5446  * mpi3mr_check_op_admin_proc -
5447  * @mrioc: Adapter instance reference
5448  *
5449  * Check if any of the operation reply queues
5450  * or the admin reply queue are currently in use.
5451  * If any queue is in use, this function waits for
5452  * a maximum of 10 seconds for them to become available.
5453  *
5454  * Return: 0 on success, non-zero on failure.
5455  */
mpi3mr_check_op_admin_proc(struct mpi3mr_ioc * mrioc)5456 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
5457 {
5458 
5459 	u16 timeout = 10 * 10;
5460 	u16 elapsed_time = 0;
5461 	bool op_admin_in_use = false;
5462 
5463 	do {
5464 		op_admin_in_use = false;
5465 
5466 		/* Check admin_reply queue first to exit early */
5467 		if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
5468 			op_admin_in_use = true;
5469 		else {
5470 			/* Check op_reply queues */
5471 			int i;
5472 
5473 			for (i = 0; i < mrioc->num_queues; i++) {
5474 				if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
5475 					op_admin_in_use = true;
5476 					break;
5477 				}
5478 			}
5479 		}
5480 
5481 		if (!op_admin_in_use)
5482 			break;
5483 
5484 		msleep(100);
5485 
5486 	} while (++elapsed_time < timeout);
5487 
5488 	if (op_admin_in_use)
5489 		return 1;
5490 
5491 	return 0;
5492 }
5493 
5494 /**
5495  * mpi3mr_soft_reset_handler - Reset the controller
5496  * @mrioc: Adapter instance reference
5497  * @reset_reason: Reset reason code
5498  * @snapdump: Flag to generate snapdump in firmware or not
5499  *
5500  * This is an handler for recovering controller by issuing soft
5501  * reset are diag fault reset.  This is a blocking function and
5502  * when one reset is executed if any other resets they will be
5503  * blocked. All BSG requests will be blocked during the reset. If
5504  * controller reset is successful then the controller will be
5505  * reinitalized, otherwise the controller will be marked as not
5506  * recoverable
5507  *
5508  * In snapdump bit is set, the controller is issued with diag
5509  * fault reset so that the firmware can create a snap dump and
5510  * post that the firmware will result in F000 fault and the
5511  * driver will issue soft reset to recover from that.
5512  *
5513  * Return: 0 on success, non-zero on failure.
5514  */
mpi3mr_soft_reset_handler(struct mpi3mr_ioc * mrioc,u16 reset_reason,u8 snapdump)5515 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5516 	u16 reset_reason, u8 snapdump)
5517 {
5518 	int retval = 0, i;
5519 	unsigned long flags;
5520 	enum mpi3mr_iocstate ioc_state;
5521 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5522 	union mpi3mr_trigger_data trigger_data;
5523 
5524 	/* Block the reset handler until diag save in progress*/
5525 	dprint_reset(mrioc,
5526 	    "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5527 	    mrioc->diagsave_timeout);
5528 	while (mrioc->diagsave_timeout)
5529 		ssleep(1);
5530 	/*
5531 	 * Block new resets until the currently executing one is finished and
5532 	 * return the status of the existing reset for all blocked resets
5533 	 */
5534 	dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5535 	if (!mutex_trylock(&mrioc->reset_mutex)) {
5536 		ioc_info(mrioc,
5537 		    "controller reset triggered by %s is blocked due to another reset in progress\n",
5538 		    mpi3mr_reset_rc_name(reset_reason));
5539 		do {
5540 			ssleep(1);
5541 		} while (mrioc->reset_in_progress == 1);
5542 		ioc_info(mrioc,
5543 		    "returning previous reset result(%d) for the reset triggered by %s\n",
5544 		    mrioc->prev_reset_result,
5545 		    mpi3mr_reset_rc_name(reset_reason));
5546 		return mrioc->prev_reset_result;
5547 	}
5548 	ioc_info(mrioc, "controller reset is triggered by %s\n",
5549 	    mpi3mr_reset_rc_name(reset_reason));
5550 
5551 	mrioc->device_refresh_on = 0;
5552 	scsi_block_requests(mrioc->shost);
5553 	mrioc->reset_in_progress = 1;
5554 	mrioc->stop_bsgs = 1;
5555 	mrioc->prev_reset_result = -1;
5556 	memset(&trigger_data, 0, sizeof(trigger_data));
5557 
5558 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5559 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5560 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5561 		mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5562 		    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5563 		dprint_reset(mrioc,
5564 		    "soft_reset_handler: releasing host diagnostic buffers\n");
5565 		mpi3mr_release_diag_bufs(mrioc, 0);
5566 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5567 			mrioc->event_masks[i] = -1;
5568 
5569 		dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5570 		mpi3mr_issue_event_notification(mrioc);
5571 	}
5572 
5573 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5574 
5575 	mpi3mr_ioc_disable_intr(mrioc);
5576 	mrioc->io_admin_reset_sync = 1;
5577 
5578 	if (snapdump) {
5579 		retval = mpi3mr_issue_reset(mrioc,
5580 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5581 		if (!retval) {
5582 			trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5583 				      MPI3_SYSIF_FAULT_CODE_MASK);
5584 			do {
5585 				host_diagnostic =
5586 				    readl(&mrioc->sysif_regs->host_diagnostic);
5587 				if (!(host_diagnostic &
5588 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5589 					break;
5590 				msleep(100);
5591 			} while (--timeout);
5592 
5593 			mpi3mr_save_fault_info(mrioc);
5594 			mpi3mr_fault_uevent_emit(mrioc);
5595 			mrioc->fwfault_counter++;
5596 			mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5597 			    MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5598 		}
5599 	}
5600 
5601 	retval = mpi3mr_issue_reset(mrioc,
5602 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5603 	if (retval) {
5604 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5605 		goto out;
5606 	}
5607 
5608 	retval = mpi3mr_check_op_admin_proc(mrioc);
5609 	if (retval) {
5610 		ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
5611 				"thread still processing replies even after a 10 second\n"
5612 				"timeout. Marking the controller as unrecoverable!\n");
5613 
5614 		goto out;
5615 	}
5616 
5617 	if (mrioc->num_io_throttle_group !=
5618 	    mrioc->facts.max_io_throttle_group) {
5619 		ioc_err(mrioc,
5620 		    "max io throttle group doesn't match old(%d), new(%d)\n",
5621 		    mrioc->num_io_throttle_group,
5622 		    mrioc->facts.max_io_throttle_group);
5623 		retval = -EPERM;
5624 		goto out;
5625 	}
5626 
5627 	mpi3mr_flush_delayed_cmd_lists(mrioc);
5628 	mpi3mr_flush_drv_cmds(mrioc);
5629 	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5630 	bitmap_clear(mrioc->removepend_bitmap, 0,
5631 		     mrioc->dev_handle_bitmap_bits);
5632 	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5633 	mpi3mr_flush_host_io(mrioc);
5634 	mpi3mr_cleanup_fwevt_list(mrioc);
5635 	mpi3mr_invalidate_devhandles(mrioc);
5636 	mpi3mr_free_enclosure_list(mrioc);
5637 
5638 	if (mrioc->prepare_for_reset) {
5639 		mrioc->prepare_for_reset = 0;
5640 		mrioc->prepare_for_reset_timeout_counter = 0;
5641 	}
5642 	mpi3mr_memset_buffers(mrioc);
5643 	mpi3mr_release_diag_bufs(mrioc, 1);
5644 	mrioc->fw_release_trigger_active = false;
5645 	mrioc->trace_release_trigger_active = false;
5646 	mrioc->snapdump_trigger_active = false;
5647 	mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5648 	    MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5649 
5650 	dprint_reset(mrioc,
5651 	    "soft_reset_handler: reinitializing the controller\n");
5652 	retval = mpi3mr_reinit_ioc(mrioc, 0);
5653 	if (retval) {
5654 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5655 		    mrioc->name, reset_reason);
5656 		goto out;
5657 	}
5658 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5659 
5660 out:
5661 	if (!retval) {
5662 		mrioc->diagsave_timeout = 0;
5663 		mrioc->reset_in_progress = 0;
5664 		scsi_unblock_requests(mrioc->shost);
5665 		mrioc->pel_abort_requested = 0;
5666 		if (mrioc->pel_enabled) {
5667 			mrioc->pel_cmds.retry_count = 0;
5668 			mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5669 		}
5670 
5671 		mrioc->device_refresh_on = 0;
5672 
5673 		mrioc->ts_update_counter = 0;
5674 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5675 		if (mrioc->watchdog_work_q)
5676 			queue_delayed_work(mrioc->watchdog_work_q,
5677 			    &mrioc->watchdog_work,
5678 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5679 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5680 		mrioc->stop_bsgs = 0;
5681 		if (mrioc->pel_enabled)
5682 			atomic64_inc(&event_counter);
5683 	} else {
5684 		dprint_reset(mrioc,
5685 			"soft_reset_handler failed, marking controller as unrecoverable\n");
5686 		ioc_state = mpi3mr_get_iocstate(mrioc);
5687 
5688 		if (ioc_state != MRIOC_STATE_FAULT)
5689 			mpi3mr_issue_reset(mrioc,
5690 				MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5691 		mrioc->device_refresh_on = 0;
5692 		mrioc->unrecoverable = 1;
5693 		mrioc->reset_in_progress = 0;
5694 		scsi_unblock_requests(mrioc->shost);
5695 		mrioc->stop_bsgs = 0;
5696 		retval = -1;
5697 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5698 	}
5699 	mrioc->prev_reset_result = retval;
5700 	mutex_unlock(&mrioc->reset_mutex);
5701 	ioc_info(mrioc, "controller reset is %s\n",
5702 	    ((retval == 0) ? "successful" : "failed"));
5703 	return retval;
5704 }
5705 
5706 /**
5707  * mpi3mr_post_cfg_req - Issue config requests and wait
5708  * @mrioc: Adapter instance reference
5709  * @cfg_req: Configuration request
5710  * @timeout: Timeout in seconds
5711  * @ioc_status: Pointer to return ioc status
5712  *
5713  * A generic function for posting MPI3 configuration request to
5714  * the firmware. This blocks for the completion of request for
5715  * timeout seconds and if the request times out this function
5716  * faults the controller with proper reason code.
5717  *
5718  * On successful completion of the request this function returns
5719  * appropriate ioc status from the firmware back to the caller.
5720  *
5721  * Return: 0 on success, non-zero on failure.
5722  */
mpi3mr_post_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,int timeout,u16 * ioc_status)5723 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5724 	struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5725 {
5726 	int retval = 0;
5727 
5728 	mutex_lock(&mrioc->cfg_cmds.mutex);
5729 	if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5730 		retval = -1;
5731 		ioc_err(mrioc, "sending config request failed due to command in use\n");
5732 		mutex_unlock(&mrioc->cfg_cmds.mutex);
5733 		goto out;
5734 	}
5735 	mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5736 	mrioc->cfg_cmds.is_waiting = 1;
5737 	mrioc->cfg_cmds.callback = NULL;
5738 	mrioc->cfg_cmds.ioc_status = 0;
5739 	mrioc->cfg_cmds.ioc_loginfo = 0;
5740 
5741 	cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5742 	cfg_req->function = MPI3_FUNCTION_CONFIG;
5743 
5744 	init_completion(&mrioc->cfg_cmds.done);
5745 	dprint_cfg_info(mrioc, "posting config request\n");
5746 	if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5747 		dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5748 		    "mpi3_cfg_req");
5749 	retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5750 	if (retval) {
5751 		ioc_err(mrioc, "posting config request failed\n");
5752 		goto out_unlock;
5753 	}
5754 	wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5755 	if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5756 		mpi3mr_check_rh_fault_ioc(mrioc,
5757 		    MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5758 		ioc_err(mrioc, "config request timed out\n");
5759 		retval = -1;
5760 		goto out_unlock;
5761 	}
5762 	*ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5763 	if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5764 		dprint_cfg_err(mrioc,
5765 		    "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5766 		    *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5767 
5768 out_unlock:
5769 	mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5770 	mutex_unlock(&mrioc->cfg_cmds.mutex);
5771 
5772 out:
5773 	return retval;
5774 }
5775 
5776 /**
5777  * mpi3mr_process_cfg_req - config page request processor
5778  * @mrioc: Adapter instance reference
5779  * @cfg_req: Configuration request
5780  * @cfg_hdr: Configuration page header
5781  * @timeout: Timeout in seconds
5782  * @ioc_status: Pointer to return ioc status
5783  * @cfg_buf: Memory pointer to copy config page or header
5784  * @cfg_buf_sz: Size of the memory to get config page or header
5785  *
5786  * This is handler for config page read, write and config page
5787  * header read operations.
5788  *
5789  * This function expects the cfg_req to be populated with page
5790  * type, page number, action for the header read and with page
5791  * address for all other operations.
5792  *
5793  * The cfg_hdr can be passed as null for reading required header
5794  * details for read/write pages the cfg_hdr should point valid
5795  * configuration page header.
5796  *
5797  * This allocates dmaable memory based on the size of the config
5798  * buffer and set the SGE of the cfg_req.
5799  *
5800  * For write actions, the config page data has to be passed in
5801  * the cfg_buf and size of the data has to be mentioned in the
5802  * cfg_buf_sz.
5803  *
5804  * For read/header actions, on successful completion of the
5805  * request with successful ioc_status the data will be copied
5806  * into the cfg_buf limited to a minimum of actual page size and
5807  * cfg_buf_sz
5808  *
5809  *
5810  * Return: 0 on success, non-zero on failure.
5811  */
mpi3mr_process_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,struct mpi3_config_page_header * cfg_hdr,int timeout,u16 * ioc_status,void * cfg_buf,u32 cfg_buf_sz)5812 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5813 	struct mpi3_config_request *cfg_req,
5814 	struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5815 	void *cfg_buf, u32 cfg_buf_sz)
5816 {
5817 	struct dma_memory_desc mem_desc;
5818 	int retval = -1;
5819 	u8 invalid_action = 0;
5820 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5821 
5822 	memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5823 
5824 	if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5825 		mem_desc.size = sizeof(struct mpi3_config_page_header);
5826 	else {
5827 		if (!cfg_hdr) {
5828 			ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5829 			    cfg_req->action, cfg_req->page_type,
5830 			    cfg_req->page_number);
5831 			goto out;
5832 		}
5833 		switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5834 		case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5835 			if (cfg_req->action
5836 			    != MPI3_CONFIG_ACTION_READ_CURRENT)
5837 				invalid_action = 1;
5838 			break;
5839 		case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5840 			if ((cfg_req->action ==
5841 			     MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5842 			    (cfg_req->action ==
5843 			     MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5844 				invalid_action = 1;
5845 			break;
5846 		case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5847 		default:
5848 			break;
5849 		}
5850 		if (invalid_action) {
5851 			ioc_err(mrioc,
5852 			    "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5853 			    cfg_req->action, cfg_req->page_type,
5854 			    cfg_req->page_number, cfg_hdr->page_attribute);
5855 			goto out;
5856 		}
5857 		mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5858 		cfg_req->page_length = cfg_hdr->page_length;
5859 		cfg_req->page_version = cfg_hdr->page_version;
5860 	}
5861 
5862 	mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
5863 		mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
5864 
5865 	if (!mem_desc.addr)
5866 		return retval;
5867 
5868 	mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5869 	    mem_desc.dma_addr);
5870 
5871 	if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5872 	    (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5873 		memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5874 		    cfg_buf_sz));
5875 		dprint_cfg_info(mrioc, "config buffer to be written\n");
5876 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5877 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5878 	}
5879 
5880 	if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5881 		goto out;
5882 
5883 	retval = 0;
5884 	if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5885 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5886 	    (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5887 		memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5888 		    cfg_buf_sz));
5889 		dprint_cfg_info(mrioc, "config buffer read\n");
5890 		if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5891 			dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5892 	}
5893 
5894 out:
5895 	if (mem_desc.addr) {
5896 		dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
5897 			mem_desc.addr, mem_desc.dma_addr);
5898 		mem_desc.addr = NULL;
5899 	}
5900 
5901 	return retval;
5902 }
5903 
5904 /**
5905  * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5906  * @mrioc: Adapter instance reference
5907  * @ioc_status: Pointer to return ioc status
5908  * @dev_pg0: Pointer to return device page 0
5909  * @pg_sz: Size of the memory allocated to the page pointer
5910  * @form: The form to be used for addressing the page
5911  * @form_spec: Form specific information like device handle
5912  *
5913  * This is handler for config page read for a specific device
5914  * page0. The ioc_status has the controller returned ioc_status.
5915  * This routine doesn't check ioc_status to decide whether the
5916  * page read is success or not and it is the callers
5917  * responsibility.
5918  *
5919  * Return: 0 on success, non-zero on failure.
5920  */
mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_device_page0 * dev_pg0,u16 pg_sz,u32 form,u32 form_spec)5921 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5922 	struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5923 {
5924 	struct mpi3_config_page_header cfg_hdr;
5925 	struct mpi3_config_request cfg_req;
5926 	u32 page_address;
5927 
5928 	memset(dev_pg0, 0, pg_sz);
5929 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5930 	memset(&cfg_req, 0, sizeof(cfg_req));
5931 
5932 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5933 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5934 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5935 	cfg_req.page_number = 0;
5936 	cfg_req.page_address = 0;
5937 
5938 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5939 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5940 		ioc_err(mrioc, "device page0 header read failed\n");
5941 		goto out_failed;
5942 	}
5943 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5944 		ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5945 		    *ioc_status);
5946 		goto out_failed;
5947 	}
5948 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5949 	page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5950 	    (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5951 	cfg_req.page_address = cpu_to_le32(page_address);
5952 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5953 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5954 		ioc_err(mrioc, "device page0 read failed\n");
5955 		goto out_failed;
5956 	}
5957 	return 0;
5958 out_failed:
5959 	return -1;
5960 }
5961 
5962 
5963 /**
5964  * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5965  * @mrioc: Adapter instance reference
5966  * @ioc_status: Pointer to return ioc status
5967  * @phy_pg0: Pointer to return SAS Phy page 0
5968  * @pg_sz: Size of the memory allocated to the page pointer
5969  * @form: The form to be used for addressing the page
5970  * @form_spec: Form specific information like phy number
5971  *
5972  * This is handler for config page read for a specific SAS Phy
5973  * page0. The ioc_status has the controller returned ioc_status.
5974  * This routine doesn't check ioc_status to decide whether the
5975  * page read is success or not and it is the callers
5976  * responsibility.
5977  *
5978  * Return: 0 on success, non-zero on failure.
5979  */
mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page0 * phy_pg0,u16 pg_sz,u32 form,u32 form_spec)5980 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5981 	struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5982 	u32 form_spec)
5983 {
5984 	struct mpi3_config_page_header cfg_hdr;
5985 	struct mpi3_config_request cfg_req;
5986 	u32 page_address;
5987 
5988 	memset(phy_pg0, 0, pg_sz);
5989 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5990 	memset(&cfg_req, 0, sizeof(cfg_req));
5991 
5992 	cfg_req.function = MPI3_FUNCTION_CONFIG;
5993 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5994 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5995 	cfg_req.page_number = 0;
5996 	cfg_req.page_address = 0;
5997 
5998 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5999 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6000 		ioc_err(mrioc, "sas phy page0 header read failed\n");
6001 		goto out_failed;
6002 	}
6003 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6004 		ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
6005 		    *ioc_status);
6006 		goto out_failed;
6007 	}
6008 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6009 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
6010 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
6011 	cfg_req.page_address = cpu_to_le32(page_address);
6012 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6013 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
6014 		ioc_err(mrioc, "sas phy page0 read failed\n");
6015 		goto out_failed;
6016 	}
6017 	return 0;
6018 out_failed:
6019 	return -1;
6020 }
6021 
6022 /**
6023  * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
6024  * @mrioc: Adapter instance reference
6025  * @ioc_status: Pointer to return ioc status
6026  * @phy_pg1: Pointer to return SAS Phy page 1
6027  * @pg_sz: Size of the memory allocated to the page pointer
6028  * @form: The form to be used for addressing the page
6029  * @form_spec: Form specific information like phy number
6030  *
6031  * This is handler for config page read for a specific SAS Phy
6032  * page1. The ioc_status has the controller returned ioc_status.
6033  * This routine doesn't check ioc_status to decide whether the
6034  * page read is success or not and it is the callers
6035  * responsibility.
6036  *
6037  * Return: 0 on success, non-zero on failure.
6038  */
mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page1 * phy_pg1,u16 pg_sz,u32 form,u32 form_spec)6039 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6040 	struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
6041 	u32 form_spec)
6042 {
6043 	struct mpi3_config_page_header cfg_hdr;
6044 	struct mpi3_config_request cfg_req;
6045 	u32 page_address;
6046 
6047 	memset(phy_pg1, 0, pg_sz);
6048 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6049 	memset(&cfg_req, 0, sizeof(cfg_req));
6050 
6051 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6052 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6053 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
6054 	cfg_req.page_number = 1;
6055 	cfg_req.page_address = 0;
6056 
6057 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6058 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6059 		ioc_err(mrioc, "sas phy page1 header read failed\n");
6060 		goto out_failed;
6061 	}
6062 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6063 		ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
6064 		    *ioc_status);
6065 		goto out_failed;
6066 	}
6067 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6068 	page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
6069 	    (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
6070 	cfg_req.page_address = cpu_to_le32(page_address);
6071 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6072 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
6073 		ioc_err(mrioc, "sas phy page1 read failed\n");
6074 		goto out_failed;
6075 	}
6076 	return 0;
6077 out_failed:
6078 	return -1;
6079 }
6080 
6081 
6082 /**
6083  * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
6084  * @mrioc: Adapter instance reference
6085  * @ioc_status: Pointer to return ioc status
6086  * @exp_pg0: Pointer to return SAS Expander page 0
6087  * @pg_sz: Size of the memory allocated to the page pointer
6088  * @form: The form to be used for addressing the page
6089  * @form_spec: Form specific information like device handle
6090  *
6091  * This is handler for config page read for a specific SAS
6092  * Expander page0. The ioc_status has the controller returned
6093  * ioc_status. This routine doesn't check ioc_status to decide
6094  * whether the page read is success or not and it is the callers
6095  * responsibility.
6096  *
6097  * Return: 0 on success, non-zero on failure.
6098  */
mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page0 * exp_pg0,u16 pg_sz,u32 form,u32 form_spec)6099 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6100 	struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
6101 	u32 form_spec)
6102 {
6103 	struct mpi3_config_page_header cfg_hdr;
6104 	struct mpi3_config_request cfg_req;
6105 	u32 page_address;
6106 
6107 	memset(exp_pg0, 0, pg_sz);
6108 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6109 	memset(&cfg_req, 0, sizeof(cfg_req));
6110 
6111 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6112 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6113 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6114 	cfg_req.page_number = 0;
6115 	cfg_req.page_address = 0;
6116 
6117 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6118 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6119 		ioc_err(mrioc, "expander page0 header read failed\n");
6120 		goto out_failed;
6121 	}
6122 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6123 		ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
6124 		    *ioc_status);
6125 		goto out_failed;
6126 	}
6127 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6128 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6129 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6130 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6131 	cfg_req.page_address = cpu_to_le32(page_address);
6132 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6133 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
6134 		ioc_err(mrioc, "expander page0 read failed\n");
6135 		goto out_failed;
6136 	}
6137 	return 0;
6138 out_failed:
6139 	return -1;
6140 }
6141 
6142 /**
6143  * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
6144  * @mrioc: Adapter instance reference
6145  * @ioc_status: Pointer to return ioc status
6146  * @exp_pg1: Pointer to return SAS Expander page 1
6147  * @pg_sz: Size of the memory allocated to the page pointer
6148  * @form: The form to be used for addressing the page
6149  * @form_spec: Form specific information like phy number
6150  *
6151  * This is handler for config page read for a specific SAS
6152  * Expander page1. The ioc_status has the controller returned
6153  * ioc_status. This routine doesn't check ioc_status to decide
6154  * whether the page read is success or not and it is the callers
6155  * responsibility.
6156  *
6157  * Return: 0 on success, non-zero on failure.
6158  */
mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page1 * exp_pg1,u16 pg_sz,u32 form,u32 form_spec)6159 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6160 	struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
6161 	u32 form_spec)
6162 {
6163 	struct mpi3_config_page_header cfg_hdr;
6164 	struct mpi3_config_request cfg_req;
6165 	u32 page_address;
6166 
6167 	memset(exp_pg1, 0, pg_sz);
6168 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6169 	memset(&cfg_req, 0, sizeof(cfg_req));
6170 
6171 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6172 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6173 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
6174 	cfg_req.page_number = 1;
6175 	cfg_req.page_address = 0;
6176 
6177 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6178 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6179 		ioc_err(mrioc, "expander page1 header read failed\n");
6180 		goto out_failed;
6181 	}
6182 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6183 		ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
6184 		    *ioc_status);
6185 		goto out_failed;
6186 	}
6187 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6188 	page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6189 	    (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6190 	    MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6191 	cfg_req.page_address = cpu_to_le32(page_address);
6192 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6193 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
6194 		ioc_err(mrioc, "expander page1 read failed\n");
6195 		goto out_failed;
6196 	}
6197 	return 0;
6198 out_failed:
6199 	return -1;
6200 }
6201 
6202 /**
6203  * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
6204  * @mrioc: Adapter instance reference
6205  * @ioc_status: Pointer to return ioc status
6206  * @encl_pg0: Pointer to return Enclosure page 0
6207  * @pg_sz: Size of the memory allocated to the page pointer
6208  * @form: The form to be used for addressing the page
6209  * @form_spec: Form specific information like device handle
6210  *
6211  * This is handler for config page read for a specific Enclosure
6212  * page0. The ioc_status has the controller returned ioc_status.
6213  * This routine doesn't check ioc_status to decide whether the
6214  * page read is success or not and it is the callers
6215  * responsibility.
6216  *
6217  * Return: 0 on success, non-zero on failure.
6218  */
mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_enclosure_page0 * encl_pg0,u16 pg_sz,u32 form,u32 form_spec)6219 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6220 	struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
6221 	u32 form_spec)
6222 {
6223 	struct mpi3_config_page_header cfg_hdr;
6224 	struct mpi3_config_request cfg_req;
6225 	u32 page_address;
6226 
6227 	memset(encl_pg0, 0, pg_sz);
6228 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6229 	memset(&cfg_req, 0, sizeof(cfg_req));
6230 
6231 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6232 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6233 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
6234 	cfg_req.page_number = 0;
6235 	cfg_req.page_address = 0;
6236 
6237 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6238 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6239 		ioc_err(mrioc, "enclosure page0 header read failed\n");
6240 		goto out_failed;
6241 	}
6242 	if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6243 		ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
6244 		    *ioc_status);
6245 		goto out_failed;
6246 	}
6247 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6248 	page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
6249 	    (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
6250 	cfg_req.page_address = cpu_to_le32(page_address);
6251 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6252 	    MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
6253 		ioc_err(mrioc, "enclosure page0 read failed\n");
6254 		goto out_failed;
6255 	}
6256 	return 0;
6257 out_failed:
6258 	return -1;
6259 }
6260 
6261 
6262 /**
6263  * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
6264  * @mrioc: Adapter instance reference
6265  * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
6266  * @pg_sz: Size of the memory allocated to the page pointer
6267  *
6268  * This is handler for config page read for the SAS IO Unit
6269  * page0. This routine checks ioc_status to decide whether the
6270  * page read is success or not.
6271  *
6272  * Return: 0 on success, non-zero on failure.
6273  */
mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page0 * sas_io_unit_pg0,u16 pg_sz)6274 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
6275 	struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
6276 {
6277 	struct mpi3_config_page_header cfg_hdr;
6278 	struct mpi3_config_request cfg_req;
6279 	u16 ioc_status = 0;
6280 
6281 	memset(sas_io_unit_pg0, 0, pg_sz);
6282 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6283 	memset(&cfg_req, 0, sizeof(cfg_req));
6284 
6285 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6286 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6287 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6288 	cfg_req.page_number = 0;
6289 	cfg_req.page_address = 0;
6290 
6291 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6292 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6293 		ioc_err(mrioc, "sas io unit page0 header read failed\n");
6294 		goto out_failed;
6295 	}
6296 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6297 		ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
6298 		    ioc_status);
6299 		goto out_failed;
6300 	}
6301 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6302 
6303 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6304 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
6305 		ioc_err(mrioc, "sas io unit page0 read failed\n");
6306 		goto out_failed;
6307 	}
6308 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6309 		ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
6310 		    ioc_status);
6311 		goto out_failed;
6312 	}
6313 	return 0;
6314 out_failed:
6315 	return -1;
6316 }
6317 
6318 /**
6319  * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
6320  * @mrioc: Adapter instance reference
6321  * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
6322  * @pg_sz: Size of the memory allocated to the page pointer
6323  *
6324  * This is handler for config page read for the SAS IO Unit
6325  * page1. This routine checks ioc_status to decide whether the
6326  * page read is success or not.
6327  *
6328  * Return: 0 on success, non-zero on failure.
6329  */
mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6330 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6331 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6332 {
6333 	struct mpi3_config_page_header cfg_hdr;
6334 	struct mpi3_config_request cfg_req;
6335 	u16 ioc_status = 0;
6336 
6337 	memset(sas_io_unit_pg1, 0, pg_sz);
6338 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6339 	memset(&cfg_req, 0, sizeof(cfg_req));
6340 
6341 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6342 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6343 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6344 	cfg_req.page_number = 1;
6345 	cfg_req.page_address = 0;
6346 
6347 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6348 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6349 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6350 		goto out_failed;
6351 	}
6352 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6353 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6354 		    ioc_status);
6355 		goto out_failed;
6356 	}
6357 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6358 
6359 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6360 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6361 		ioc_err(mrioc, "sas io unit page1 read failed\n");
6362 		goto out_failed;
6363 	}
6364 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6365 		ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6366 		    ioc_status);
6367 		goto out_failed;
6368 	}
6369 	return 0;
6370 out_failed:
6371 	return -1;
6372 }
6373 
6374 /**
6375  * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6376  * @mrioc: Adapter instance reference
6377  * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6378  * @pg_sz: Size of the memory allocated to the page pointer
6379  *
6380  * This is handler for config page write for the SAS IO Unit
6381  * page1. This routine checks ioc_status to decide whether the
6382  * page read is success or not. This will modify both current
6383  * and persistent page.
6384  *
6385  * Return: 0 on success, non-zero on failure.
6386  */
mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6387 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6388 	struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6389 {
6390 	struct mpi3_config_page_header cfg_hdr;
6391 	struct mpi3_config_request cfg_req;
6392 	u16 ioc_status = 0;
6393 
6394 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6395 	memset(&cfg_req, 0, sizeof(cfg_req));
6396 
6397 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6398 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6399 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6400 	cfg_req.page_number = 1;
6401 	cfg_req.page_address = 0;
6402 
6403 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6404 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6405 		ioc_err(mrioc, "sas io unit page1 header read failed\n");
6406 		goto out_failed;
6407 	}
6408 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6409 		ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6410 		    ioc_status);
6411 		goto out_failed;
6412 	}
6413 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6414 
6415 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6416 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6417 		ioc_err(mrioc, "sas io unit page1 write current failed\n");
6418 		goto out_failed;
6419 	}
6420 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6421 		ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6422 		    ioc_status);
6423 		goto out_failed;
6424 	}
6425 
6426 	cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6427 
6428 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6429 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6430 		ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6431 		goto out_failed;
6432 	}
6433 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6434 		ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6435 		    ioc_status);
6436 		goto out_failed;
6437 	}
6438 	return 0;
6439 out_failed:
6440 	return -1;
6441 }
6442 
6443 /**
6444  * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6445  * @mrioc: Adapter instance reference
6446  * @driver_pg1: Pointer to return Driver page 1
6447  * @pg_sz: Size of the memory allocated to the page pointer
6448  *
6449  * This is handler for config page read for the Driver page1.
6450  * This routine checks ioc_status to decide whether the page
6451  * read is success or not.
6452  *
6453  * Return: 0 on success, non-zero on failure.
6454  */
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page1 * driver_pg1,u16 pg_sz)6455 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6456 	struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6457 {
6458 	struct mpi3_config_page_header cfg_hdr;
6459 	struct mpi3_config_request cfg_req;
6460 	u16 ioc_status = 0;
6461 
6462 	memset(driver_pg1, 0, pg_sz);
6463 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6464 	memset(&cfg_req, 0, sizeof(cfg_req));
6465 
6466 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6467 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6468 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6469 	cfg_req.page_number = 1;
6470 	cfg_req.page_address = 0;
6471 
6472 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6473 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6474 		ioc_err(mrioc, "driver page1 header read failed\n");
6475 		goto out_failed;
6476 	}
6477 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6478 		ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6479 		    ioc_status);
6480 		goto out_failed;
6481 	}
6482 	cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6483 
6484 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6485 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6486 		ioc_err(mrioc, "driver page1 read failed\n");
6487 		goto out_failed;
6488 	}
6489 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6490 		ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6491 		    ioc_status);
6492 		goto out_failed;
6493 	}
6494 	return 0;
6495 out_failed:
6496 	return -1;
6497 }
6498 
6499 /**
6500  * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6501  * @mrioc: Adapter instance reference
6502  * @driver_pg2: Pointer to return driver page 2
6503  * @pg_sz: Size of the memory allocated to the page pointer
6504  * @page_action: Page action
6505  *
6506  * This is handler for config page read for the driver page2.
6507  * This routine checks ioc_status to decide whether the page
6508  * read is success or not.
6509  *
6510  * Return: 0 on success, non-zero on failure.
6511  */
mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page2 * driver_pg2,u16 pg_sz,u8 page_action)6512 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6513 	struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6514 {
6515 	struct mpi3_config_page_header cfg_hdr;
6516 	struct mpi3_config_request cfg_req;
6517 	u16 ioc_status = 0;
6518 
6519 	memset(driver_pg2, 0, pg_sz);
6520 	memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6521 	memset(&cfg_req, 0, sizeof(cfg_req));
6522 
6523 	cfg_req.function = MPI3_FUNCTION_CONFIG;
6524 	cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6525 	cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6526 	cfg_req.page_number = 2;
6527 	cfg_req.page_address = 0;
6528 	cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6529 
6530 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6531 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6532 		ioc_err(mrioc, "driver page2 header read failed\n");
6533 		goto out_failed;
6534 	}
6535 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6536 		ioc_err(mrioc, "driver page2 header read failed with\n"
6537 			       "ioc_status(0x%04x)\n",
6538 		    ioc_status);
6539 		goto out_failed;
6540 	}
6541 	cfg_req.action = page_action;
6542 
6543 	if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6544 	    MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6545 		ioc_err(mrioc, "driver page2 read failed\n");
6546 		goto out_failed;
6547 	}
6548 	if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6549 		ioc_err(mrioc, "driver page2 read failed with\n"
6550 			       "ioc_status(0x%04x)\n",
6551 		    ioc_status);
6552 		goto out_failed;
6553 	}
6554 	return 0;
6555 out_failed:
6556 	return -1;
6557 }
6558 
6559